diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 000000000..f0957c311 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +packages/grpc-js-xds/src/generated/** linguist-generated +packages/grpc-js-xds/interop/generated/** linguist-generated diff --git a/.github/workflows/grpc-tools-build.yml b/.github/workflows/grpc-tools-build.yml index f32a688c4..64ee81212 100644 --- a/.github/workflows/grpc-tools-build.yml +++ b/.github/workflows/grpc-tools-build.yml @@ -8,6 +8,9 @@ on: branches: - master +permissions: + contents: read # to fetch code (actions/checkout) + jobs: linux_build: name: Linux grpc-tools Build diff --git a/.gitignore b/.gitignore index fce837e45..8e63d8bf0 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ node_modules/ npm-debug.log yarn-error.log yarn.lock +artifacts # Emacs temp files *~ @@ -15,6 +16,7 @@ yarn.lock reports/ package-lock.json +pnpm-lock.yaml # Test generated files coverage diff --git a/.gitmodules b/.gitmodules index d3c1ebaf6..f54fa6afc 100644 --- a/.gitmodules +++ b/.gitmodules @@ -10,12 +10,12 @@ [submodule "packages/grpc-js-xds/deps/envoy-api"] path = packages/grpc-js-xds/deps/envoy-api url = https://github.com/envoyproxy/data-plane-api.git -[submodule "packages/grpc-js-xds/deps/udpa"] - path = packages/grpc-js-xds/deps/udpa - url = https://github.com/cncf/udpa.git [submodule "packages/grpc-js-xds/deps/googleapis"] path = packages/grpc-js-xds/deps/googleapis url = https://github.com/googleapis/googleapis.git [submodule "packages/grpc-js-xds/deps/protoc-gen-validate"] path = packages/grpc-js-xds/deps/protoc-gen-validate url = https://github.com/envoyproxy/protoc-gen-validate.git +[submodule "packages/grpc-js-xds/deps/xds"] + path = packages/grpc-js-xds/deps/xds + url = https://github.com/cncf/xds.git diff --git a/MAINTAINERS.md b/MAINTAINERS.md index c9e0522a1..bdcef2e26 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -8,16 +8,17 @@ See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIB for general contribution guidelines. ## Maintainers (in alphabetical order) - - [jiangtaoli2016](https://github.com/jiangtaoli2016), Google Inc. + - [jtattermusch](https://github.com/jtattermusch), Google Inc. - [murgatroid99](https://github.com/murgatroid99), Google Inc. - [nicolasnoble](https://github.com/nicolasnoble), Google Inc. - - [ofrobots](https://github.com/ofrobots), Google Inc. - [srini100](https://github.com/srini100), Google Inc. - - [WeiranFang](https://github.com/WeiranFang), Google Inc. - [wenbozhu](https://github.com/wenbozhu), Google Inc. ## Emeritus Maintainers (in alphabetical order) + - [jiangtaoli2016](https://github.com/jiangtaoli2016), Google Inc. - [kjin](https://github.com/kjin), Google Inc. - [matt-kwong](https://github.com/matt-kwong), Google Inc. - \ No newline at end of file + - [ofrobots](https://github.com/ofrobots), Google Inc. + - [WeiranFang](https://github.com/WeiranFang), Google Inc. + diff --git a/PACKAGE-COMPARISON.md b/PACKAGE-COMPARISON.md index fa0ea319e..e6cee8934 100644 --- a/PACKAGE-COMPARISON.md +++ b/PACKAGE-COMPARISON.md @@ -1,6 +1,6 @@ # Feature comparison of `grpc` and `@grpc/grpc-js` packages -Feature | `grpc` | `@grpc/grpc-js` +Feature | `grpc` (deprecated) | `@grpc/grpc-js` --------|--------|---------- Client | :heavy_check_mark: | :heavy_check_mark: Server | :heavy_check_mark: | :heavy_check_mark: @@ -9,41 +9,24 @@ Streaming RPCs | :heavy_check_mark: | :heavy_check_mark: Deadlines | :heavy_check_mark: | :heavy_check_mark: Cancellation | :heavy_check_mark: | :heavy_check_mark: Automatic Reconnection | :heavy_check_mark: | :heavy_check_mark: -Per-message Compression | :heavy_check_mark: | only for response messages +Per-message Compression | :heavy_check_mark: | :heavy_check_mark: (except messages sent by the server) Channel State | :heavy_check_mark: | :heavy_check_mark: JWT Access and Service Account Credentials | provided by the [Google Auth Library](https://www.npmjs.com/package/google-auth-library) | provided by the [Google Auth Library](https://www.npmjs.com/package/google-auth-library) Interceptors | :heavy_check_mark: | :heavy_check_mark: Connection Keepalives | :heavy_check_mark: | :heavy_check_mark: HTTP Connect Support | :heavy_check_mark: | :heavy_check_mark: -Retries | :heavy_check_mark: | :x: +Retries | :heavy_check_mark: (without hedging) | :heavy_check_mark: (including hedging) Stats/tracing/monitoring | :heavy_check_mark: | :x: -Load Balancing | :heavy_check_mark: | Pick first and round robin +Load Balancing | :heavy_check_mark: | :heavy_check_mark: Initial Metadata Options | :heavy_check_mark: | only `waitForReady` Other Properties | `grpc` | `@grpc/grpc-js` -----------------|--------|---------------- Pure JavaScript Code | :x: | :heavy_check_mark: -Supported Node Versions | >= 4 | ^8.13.0 or >=10.10.0 -Supported Electron Versions | All | >= 3 +Supported Node Versions | >= 4 and <=14 | ^8.13.0 or >=10.10.0 +Supported Electron Versions | <=11.2 | >= 3 Supported Platforms | Linux, Windows, MacOS | All Supported Architectures | x86, x86-64, ARM7+ | All -In addition, all channel arguments defined in [this header file](https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/grpc_types.h) are handled by the `grpc` library. Of those, the following are handled by the `@grpc/grpc-js` library: - - - `grpc.ssl_target_name_override` - - `grpc.primary_user_agent` - - `grpc.secondary_user_agent` - - `grpc.default_authority` - - `grpc.keepalive_time_ms` - - `grpc.keepalive_timeout_ms` - - `grpc.keepalive_permit_without_calls` - - `grpc.service_config` - - `grpc.max_concurrent_streams` - - `grpc.initial_reconnect_backoff_ms` - - `grpc.max_reconnect_backoff_ms` - - `grpc.use_local_subchannel_pool` - - `grpc.max_send_message_length` - - `grpc.max_receive_message_length` - - `grpc.enable_http_proxy` - - `channelOverride` - - `channelFactoryOverride` +In addition, all channel arguments defined in [this header file](https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/grpc_types.h) are handled by the `grpc` library. +Of those, a subset are handled by the `@grpc/grpc-js` library. See [the README](https://github.com/grpc/grpc-node/blob/master/packages/grpc-js/README.md#supported-channel-options) for `@grpc/grpc-js` for the list of supported channel options. diff --git a/README.md b/README.md index 159fbab4f..de4cc752b 100644 --- a/README.md +++ b/README.md @@ -5,21 +5,21 @@ For a comparison of the features available in these two libraries, see [this document](https://github.com/grpc/grpc-node/tree/master/PACKAGE-COMPARISON.md) -### C-based Client and Server +### Pure JavaScript Client and Server -Directory: [`packages/grpc-native-core`](https://github.com/grpc/grpc-node/tree/grpc@1.24.x/packages/grpc-native-core) (lives in the `grpc@1.24.x` branch) (see here for installation information) +Directory: [`packages/grpc-js`](https://github.com/grpc/grpc-node/tree/master/packages/grpc-js) -npm package: [grpc](https://www.npmjs.com/package/grpc). +npm package: [@grpc/grpc-js](https://www.npmjs.com/package/@grpc/grpc-js) -This is the existing, feature-rich implementation of gRPC using a C++ addon. It works on all LTS versions of Node.js on most platforms that Node.js runs on. +This library implements the core functionality of gRPC purely in JavaScript, without a C++ addon. It works on the latest versions of Node.js on all platforms that Node.js runs on. -### Pure JavaScript Client +### C-based Client and Server (deprecated) -Directory: [`packages/grpc-js`](https://github.com/grpc/grpc-node/tree/master/packages/grpc-js) +Directory: [`packages/grpc-native-core`](https://github.com/grpc/grpc-node/tree/grpc@1.24.x/packages/grpc-native-core) (lives in the `grpc@1.24.x` branch) (see here for installation information) -npm package: [@grpc/grpc-js](https://www.npmjs.com/package/@grpc/grpc-js) +npm package: [grpc](https://www.npmjs.com/package/grpc). -This library implements the core functionality of gRPC purely in JavaScript, without a C++ addon. It works on the latest version of Node.js on all platforms that Node.js runs on. +This is the deprecated implementation of gRPC using a C++ addon. It works on versions of Node.js up to 14 on most platforms that Node.js runs on. ## Other Packages @@ -46,3 +46,11 @@ Directory: [`packages/grpc-health-check`](https://github.com/grpc/grpc-node/tree npm package: [grpc-health-check](https://www.npmjs.com/package/grpc-health-check) Health check service for gRPC servers. + +### gRPC Reflection API Service + +Directory: [`packages/grpc-reflection`](https://github.com/grpc/grpc-node/tree/master/packages/grpc-reflection) + +npm package: [@grpc/reflection](https://www.npmjs.com/package/@grpc/reflection) + +Reflection API service for gRPC servers. diff --git a/TROUBLESHOOTING.md b/TROUBLESHOOTING.md new file mode 100644 index 000000000..bfcf94edc --- /dev/null +++ b/TROUBLESHOOTING.md @@ -0,0 +1,38 @@ +# Troubleshooting grpc-js + +This guide is for troubleshooting the `grpc-js` library for Node.js + +## Enabling extra logging and tracing + +Extra logging can be very useful for diagnosing problems. `grpc-js` supports +the `GRPC_VERBOSITY` and `GRPC_TRACE` environment variables that can be used to increase the amount of information +that gets printed to stderr. + +## GRPC_VERBOSITY + +`GRPC_VERBOSITY` is used to set the minimum level of log messages printed by gRPC (supported values are `DEBUG`, `INFO` and `ERROR`). If this environment variable is unset, only `ERROR` logs will be printed. + +## GRPC_TRACE + +`GRPC_TRACE` can be used to enable extra logging for some internal gRPC components. Enabling the right traces can be invaluable +for diagnosing for what is going wrong when things aren't working as intended. Possible values for `GRPC_TRACE` are listed in [Environment Variables Overview](doc/environment_variables.md). +Multiple traces can be enabled at once (use comma as separator). + +``` +# Enable debug logs for an application +GRPC_VERBOSITY=debug ./helloworld_application_using_grpc +``` + +``` +# Print information about channel state changes +GRPC_VERBOSITY=debug GRPC_TRACE=connectivity_state ./helloworld_application_using_grpc +``` + +``` +# Print info from 3 different tracers, including tracing logs with log level DEBUG +GRPC_VERBOSITY=debug GRPC_TRACE=channel,subchannel,call_stream ./helloworld_application_using_grpc +``` + +Please note that the `GRPC_TRACE` environment variable has nothing to do with gRPC's "tracing" feature (= tracing RPCs in +microservice environment to gain insight about how requests are processed by deployment), it is merely used to enable printing +of extra logs. diff --git a/doc/environment_variables.md b/doc/environment_variables.md new file mode 100644 index 000000000..1b5ad26af --- /dev/null +++ b/doc/environment_variables.md @@ -0,0 +1,64 @@ +# grpc-js environment variables + +`@grpc/grpc-js` exposes some configuration as environment variables that +can be set. + +*For the legacy `grpc` library, the environment variables are documented +[in the main gRPC repository](https://github.com/grpc/grpc/blob/master/doc/environment_variables.md)* + +* grpc_proxy, https_proxy, http_proxy + The URI of the proxy to use for HTTP CONNECT support. These variables are + checked in order, and the first one that has a value is used. + +* no_grpc_proxy, no_proxy + A comma separated list of hostnames to connect to without using a proxy even + if a proxy is set. These variables are checked in order, and the first one + that has a value is used. + +* GRPC_SSL_CIPHER_SUITES + A colon separated list of cipher suites to use with OpenSSL + Defaults to the defaults for Node.js + +* GRPC_DEFAULT_SSL_ROOTS_FILE_PATH + PEM file to load SSL roots from + +* GRPC_NODE_TRACE, GRPC_TRACE + A comma separated list of tracers that provide additional insight into how + grpc-js is processing requests via debug logs. Available tracers include: + - `call_stream` - Traces client request internals + - `channel` - Traces channel events + - `connectivity_state` - Traces channel connectivity state changes + - `dns_resolver` - Traces DNS resolution + - `ip_resolver` - Traces IPv4/v6 resolution + - `pick_first` - Traces the pick first load balancing policy + - `proxy` - Traces proxy operations + - `resolving_load_balancer` - Traces the resolving load balancer + - `round_robin` - Traces the round robin load balancing policy + - `server` - Traces high-level server events + - `server_call` - Traces server handling of individual requests + - `subchannel` - Traces subchannel connectivity state and errors + - `subchannel_refcount` - Traces subchannel refcount changes. Includes per-call logs. + - `subchannel_flowctrl` - Traces HTTP/2 flow control. Includes per-call logs. + - `subchannel_internals` - Traces HTTP/2 session state. Includes per-call logs. + - `channel_stacktrace` - Traces channel construction events with stack traces. + - `keepalive` - Traces gRPC keepalive pings + - `outlier_detection` - Traces outlier detection events + + The following tracers are added by the `@grpc/grpc-js-xds` library: + - `cds_balancer` - Traces the CDS load balancing policy + - `eds_balancer` - Traces the EDS load balancing policy + - `priority` - Traces the priority load balancing policy + - `weighted_target` - Traces the weighted target load balancing policy + - `xds_client` - Traces the xDS Client + - `xds_cluster_manager` - Traces the xDS cluster manager load balancing policy + - `xds_resolver` - Traces the xDS name resolver + + 'all' can additionally be used to turn all traces on. + Individual traces can be disabled by prefixing them with '-'. + +* GRPC_NODE_VERBOSITY, GRPC_VERBOSITY + Default gRPC logging verbosity - one of: + - DEBUG - log all gRPC messages + - INFO - log INFO and ERROR message + - ERROR - log only errors (default) + - NONE - won't log any diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 000000000..9958b17e9 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,49 @@ +gRPC in 3 minutes (Node.js) +=========================== + +PREREQUISITES +------------- + +- `node`: This requires Node 8.13.0 or greater. + +INSTALL +------- + + ```sh + $ # Get the gRPC repository + $ export REPO_ROOT=grpc-node # REPO root can be any directory of your choice + $ git clone -b RELEASE_TAG_HERE https://github.com/grpc/grpc-node $REPO_ROOT + $ cd $REPO_ROOT + + $ cd examples + $ npm install + ``` + +TRY IT! +------- + +There are two ways to generate the code needed to work with protocol buffers in Node.js - one approach uses [Protobuf.js](https://github.com/dcodeIO/ProtoBuf.js/) to dynamically generate the code at runtime, the other uses code statically generated using the protocol buffer compiler `protoc`. The examples behave identically, and either server can be used with either client. + + - Run the server + + ```sh + $ # from this directory + $ node ./helloworld/dynamic_codegen/greeter_server.js & + $ # OR + $ node ./helloworld/static_codegen/greeter_server.js & + ``` + + - Run the client + + ```sh + $ # from this directory + $ node ./helloworld/dynamic_codegen/greeter_client.js + $ # OR + $ node ./helloworld/static_codegen/greeter_client.js + ``` + +TUTORIAL +-------- +You can find a more detailed tutorial in [gRPC Basics: Node.js][] + +[gRPC Basics: Node.js]:https://grpc.io/docs/languages/node/basics diff --git a/examples/cancellation/README.md b/examples/cancellation/README.md new file mode 100644 index 000000000..5dcd76c09 --- /dev/null +++ b/examples/cancellation/README.md @@ -0,0 +1,18 @@ +# Cancellation + +This example shows how clients can cancel in-flight RPCs by cancelling the +call object returned by the method invocation. The client will receive a status +with code `CANCELLED` and the server handler's call object will emit either a +`'cancelled'` event or an `'end'` event. + +## Start the server + +``` +node server.js +``` + +## Run the client + +``` +node client.js +``` diff --git a/examples/cancellation/client.js b/examples/cancellation/client.js new file mode 100644 index 000000000..c76487dfe --- /dev/null +++ b/examples/cancellation/client.js @@ -0,0 +1,64 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +const grpc = require('@grpc/grpc-js'); +const protoLoader = require('@grpc/proto-loader'); +const parseArgs = require('minimist'); + +const PROTO_PATH = __dirname + '/../protos/echo.proto'; + +const packageDefinition = protoLoader.loadSync( + PROTO_PATH, + {keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true + }); +const echoProto = grpc.loadPackageDefinition(packageDefinition).grpc.examples.echo; + +function main() { + let argv = parseArgs(process.argv.slice(2), { + string: 'target', + default: {target: 'localhost:50052'} + }); + const client = new echoProto.Echo(argv.target, grpc.credentials.createInsecure()); + const call = client.bidirectionalStreamingEcho(); + const EXPECTED_MESSAGES = 2; + let receivedMessages = 0; + call.on('data', value => { + console.log(`received message "${value.message}"`) + receivedMessages += 1; + if (receivedMessages >= EXPECTED_MESSAGES) { + console.log('cancelling call'); + call.cancel(); + } + }); + call.on('status', statusObject => { + console.log(`received call status with code ${grpc.status[statusObject.code]}`); + }); + call.on('error', error => { + console.log(`received error ${error}`); + }) + console.log('sending message "hello"'); + call.write({message: 'hello'}); + console.log('sending message "world"') + call.write({message: 'world'}); +} + +main(); diff --git a/examples/cancellation/server.js b/examples/cancellation/server.js new file mode 100644 index 000000000..d68033d42 --- /dev/null +++ b/examples/cancellation/server.js @@ -0,0 +1,68 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +const grpc = require('@grpc/grpc-js'); +const protoLoader = require('@grpc/proto-loader'); +const parseArgs = require('minimist'); + +const PROTO_PATH = __dirname + '/../protos/echo.proto'; + +const packageDefinition = protoLoader.loadSync( + PROTO_PATH, + {keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true + }); +const echoProto = grpc.loadPackageDefinition(packageDefinition).grpc.examples.echo; + +function bidirectionalStreamingEcho(call) { + call.on('data', value => { + const message = value.message; + console.log(`echoing message "${message}"`); + call.write({message: message}); + }); + // Either 'end' or 'cancelled' will be emitted when the call is cancelled + call.on('end', () => { + console.log('server received end event') + call.end(); + }); + call.on('cancelled', () => { + console.log('server received cancelled event'); + }); +} + +const serviceImplementation = { + bidirectionalStreamingEcho +} + +function main() { + const argv = parseArgs(process.argv.slice(2), { + string: 'port', + default: {port: '50052'} + }); + const server = new grpc.Server(); + server.addService(echoProto.Echo.service, serviceImplementation); + server.bindAsync(`0.0.0.0:${argv.port}`, grpc.ServerCredentials.createInsecure(), () => { + server.start(); + }); + client = new echoProto.Echo(`localhost:${argv.port}`, grpc.credentials.createInsecure()); +} + +main(); diff --git a/examples/deadline/client.js b/examples/deadline/client.js new file mode 100644 index 000000000..84143ac02 --- /dev/null +++ b/examples/deadline/client.js @@ -0,0 +1,92 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +const grpc = require('@grpc/grpc-js'); +const protoLoader = require('@grpc/proto-loader'); +const parseArgs = require('minimist'); + +const PROTO_PATH = __dirname + '/../protos/echo.proto'; + +const packageDefinition = protoLoader.loadSync( + PROTO_PATH, + {keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true + }); +const echoProto = grpc.loadPackageDefinition(packageDefinition).grpc.examples.echo; + +function unaryCall(client, requestId, message, expectedCode) { + return new Promise((resolve, reject) => { + const deadline = new Date(); + deadline.setSeconds(deadline.getSeconds() + 1); + client.unaryEcho({message: message}, {deadline}, (error, value) => { + let code; + if (error) { + code = error.code; + } else { + code = grpc.status.OK; + } + console.log(`[${requestId}] wanted = ${grpc.status[expectedCode]} got = ${grpc.status[code]}`); + resolve(); + }); + }); +} + +function streamingCall(client, requestId, message, expectedCode) { + return new Promise((resolve, reject) => { + const deadline = new Date(); + deadline.setSeconds(deadline.getSeconds() + 1); + const call = client.bidirectionalStreamingEcho({deadline}); + call.on('data', () => { + // Consume all response messages + }); + call.on('status', status => { + console.log(`[${requestId}] wanted = ${grpc.status[expectedCode]} got = ${grpc.status[status.code]}`); + resolve(); + }); + call.on('error', () => { + // Ignore error event + }); + call.write({message}); + call.end(); + }); +} + +async function main() { + let argv = parseArgs(process.argv.slice(2), { + string: 'target', + default: {target: 'localhost:50052'} + }); + const client = new echoProto.Echo(argv.target, grpc.credentials.createInsecure()); + // A successful request + await unaryCall(client, 1, 'world', grpc.status.OK); + // Exceeds deadline + await unaryCall(client, 2, 'delay', grpc.status.DEADLINE_EXCEEDED); + // A successful request with propagated deadline + await unaryCall(client, 3, '[propagate me]world', grpc.status.OK); + // Exceeds propagated deadline + await unaryCall(client, 4, '[propagate me][propagate me]world', grpc.status.DEADLINE_EXCEEDED); + // Receives a response from the stream successfully + await streamingCall(client, 5, '[propagate me]world', grpc.status.OK); + // Exceeds propagated deadline before receiving a response + await streamingCall(client, 6, '[propagate me][propagate me]world', grpc.status.DEADLINE_EXCEEDED); +} + +main(); diff --git a/examples/deadline/server.js b/examples/deadline/server.js new file mode 100644 index 000000000..1618a9d5a --- /dev/null +++ b/examples/deadline/server.js @@ -0,0 +1,109 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +const grpc = require('@grpc/grpc-js'); +const protoLoader = require('@grpc/proto-loader'); +const parseArgs = require('minimist'); + +const PROTO_PATH = __dirname + '/../protos/echo.proto'; + +const packageDefinition = protoLoader.loadSync( + PROTO_PATH, + {keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true + }); +const echoProto = grpc.loadPackageDefinition(packageDefinition).grpc.examples.echo; + +const PROPAGATE_PREFIX = '[propagate me]'; + +let client; + +function unaryEcho(call, callback) { + const message = call.request.message; + if (message.startsWith(PROPAGATE_PREFIX)) { + setTimeout(() => { + client.unaryEcho({message: message.slice(PROPAGATE_PREFIX.length)}, {parent: call}, callback); + }, 800); + return; + } else if (message === 'delay') { + setTimeout(() => { + callback(null, call.request); + }, 1500); + } else { + callback(null, call.request); + } +} + +function bidirectionalStreamingEcho(call) { + let lastMessage = null; + call.on('data', value => { + const message = value.message; + lastMessage = message; + call.pause(); + if (message.startsWith(PROPAGATE_PREFIX)) { + setTimeout(() => { + client.unaryEcho({message: message.slice(PROPAGATE_PREFIX.length)}, {parent: call}, (error, response) => { + call.resume(); + if (error) { + call.emit(error); + return; + } + call.write(response); + }); + }, 800); + return; + } else if (message === 'delay') { + setTimeout(() => { + call.write(value); + call.resume(); + }, 1500); + } else { + call.write(value); + call.resume(); + } + }); + call.on('end', () => { + if (lastMessage === null) { + call.emit('error', {code: grpc.status.INVALID_ARGUMENT, details: 'request message not received'}); + } + call.end(); + }); +} + +const serviceImplementation = { + unaryEcho, + bidirectionalStreamingEcho +} + +function main() { + const argv = parseArgs(process.argv.slice(2), { + string: 'port', + default: {port: '50052'} + }); + const server = new grpc.Server(); + server.addService(echoProto.Echo.service, serviceImplementation); + server.bindAsync(`0.0.0.0:${argv.port}`, grpc.ServerCredentials.createInsecure(), () => { + server.start(); + }); + client = new echoProto.Echo(`localhost:${argv.port}`, grpc.credentials.createInsecure()); +} + +main(); diff --git a/examples/error_handling/README.md b/examples/error_handling/README.md new file mode 100644 index 000000000..c1ba71d68 --- /dev/null +++ b/examples/error_handling/README.md @@ -0,0 +1,23 @@ +# Error Handling + +This example demonstrates basic RPC error handling in gRPC for unary and +streaming response cardinalities. + +## Start the server + +Run the server, whcih returns an error if the RPC request's `name` field is +empty. + +``` +node server.js +``` + +## Run the client + +Then run the client in another terminal, which makes two requests for each of +unary and streaming responses: one with an empty Name field and one with it +populated with the current username provided by os/user. + +``` +node client.js +``` diff --git a/examples/error_handling/client.js b/examples/error_handling/client.js new file mode 100644 index 000000000..1a8eff8ea --- /dev/null +++ b/examples/error_handling/client.js @@ -0,0 +1,89 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +const grpc = require('@grpc/grpc-js'); +const protoLoader = require('@grpc/proto-loader'); +const parseArgs = require('minimist'); +const os = require('os'); + +const PROTO_PATH = __dirname + '/../protos/helloworld.proto'; + +const packageDefinition = protoLoader.loadSync( + PROTO_PATH, + {keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true + }); +const helloProto = grpc.loadPackageDefinition(packageDefinition).helloworld; + +function unaryCall(client, requestId, name, expectedCode) { + console.log(`[${requestId}] Calling SayHello with name:"${name}"`); + return new Promise((resolve, reject) => { + client.sayHello({name: name}, (error, value) => { + if (error) { + if (error.code === expectedCode) { + console.log(`[${requestId}] Received error ${error.message}`); + } else { + console.log(`[${requestId}] Received unexpected error ${error.message}`); + } + } + if (value) { + console.log(`[${requestId}] Received response ${value.message}`); + } + resolve(); + }); + }); +} + +function streamingCall(client, requestId, name, expectedCode) { + console.log(`[${requestId}] Calling SayHelloStreamReply with name:"${name}"`); + return new Promise((resolve, reject) => { + const call = client.sayHelloStreamReply({name: name}); + call.on('data', value => { + console.log(`[${requestId}] Received response ${value.message}`); + }); + call.on('status', status => { + console.log(`[${requestId}] Received status with code=${grpc.status[status.code]} details=${status.details}`); + resolve(); + }); + call.on('error', error => { + if (error.code === expectedCode) { + console.log(`[${requestId}] Received expected error ${error.message}`); + } else { + console.log(`[${requestId}] Received unexpected error ${error.message}`); + } + }); + }); +} + +async function main() { + let argv = parseArgs(process.argv.slice(2), { + string: 'target', + default: {target: 'localhost:50052'} + }); + const client = new helloProto.Greeter(argv.target, grpc.credentials.createInsecure()); + const name = os.userInfo().username ?? 'unknown'; + await unaryCall(client, 1, '', grpc.status.INVALID_ARGUMENT); + await unaryCall(client, 2, name, grpc.status.OK); + await streamingCall(client, 3, '', grpc.status.INVALID_ARGUMENT); + await streamingCall(client, 4, name, grpc.status.OK); +} + +main(); diff --git a/examples/error_handling/server.js b/examples/error_handling/server.js new file mode 100644 index 000000000..e77701848 --- /dev/null +++ b/examples/error_handling/server.js @@ -0,0 +1,68 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +var PROTO_PATH = __dirname + '/../protos/helloworld.proto'; + +var grpc = require('@grpc/grpc-js'); +var protoLoader = require('@grpc/proto-loader'); +var packageDefinition = protoLoader.loadSync( + PROTO_PATH, + {keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true + }); +var hello_proto = grpc.loadPackageDefinition(packageDefinition).helloworld; + +/** + * Implements the SayHello RPC method. + */ +function sayHello(call, callback) { + if (call.request.name === '') { + callback({code: grpc.status.INVALID_ARGUMENT, details: 'request missing required field: name'}); + } + callback(null, {message: 'Hello ' + call.request.name}); +} + +const REPLY_COUNT = 5; + +function sayHelloStreamReply(call) { + if (call.request.name === '') { + call.emit('error', {code: grpc.status.INVALID_ARGUMENT, details: 'request missing required field: name'}); + } else { + for (let i = 0; i < REPLY_COUNT; i++) { + call.write({message: 'Hello ' + call.request.name}); + } + call.end(); + } +} + +/** + * Starts an RPC server that receives requests for the Greeter service at the + * sample server port + */ +function main() { + var server = new grpc.Server(); + server.addService(hello_proto.Greeter.service, {sayHello: sayHello, sayHelloStreamReply: sayHelloStreamReply}); + server.bindAsync('0.0.0.0:50052', grpc.ServerCredentials.createInsecure(), () => { + server.start(); + }); +} + +main(); diff --git a/examples/helloworld/dynamic_codegen/greeter_client.js b/examples/helloworld/dynamic_codegen/greeter_client.js new file mode 100644 index 000000000..17984893f --- /dev/null +++ b/examples/helloworld/dynamic_codegen/greeter_client.js @@ -0,0 +1,57 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +var PROTO_PATH = __dirname + '/../../protos/helloworld.proto'; + +var parseArgs = require('minimist'); +var grpc = require('@grpc/grpc-js'); +var protoLoader = require('@grpc/proto-loader'); +var packageDefinition = protoLoader.loadSync( + PROTO_PATH, + {keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true + }); +var hello_proto = grpc.loadPackageDefinition(packageDefinition).helloworld; + +function main() { + var argv = parseArgs(process.argv.slice(2), { + string: 'target' + }); + var target; + if (argv.target) { + target = argv.target; + } else { + target = 'localhost:50051'; + } + var client = new hello_proto.Greeter(target, + grpc.credentials.createInsecure()); + var user; + if (argv._.length > 0) { + user = argv._[0]; + } else { + user = 'world'; + } + client.sayHello({name: user}, function(err, response) { + console.log('Greeting:', response.message); + }); +} + +main(); diff --git a/examples/helloworld/dynamic_codegen/greeter_server.js b/examples/helloworld/dynamic_codegen/greeter_server.js new file mode 100644 index 000000000..c606cd8cc --- /dev/null +++ b/examples/helloworld/dynamic_codegen/greeter_server.js @@ -0,0 +1,52 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +var PROTO_PATH = __dirname + '/../../protos/helloworld.proto'; + +var grpc = require('@grpc/grpc-js'); +var protoLoader = require('@grpc/proto-loader'); +var packageDefinition = protoLoader.loadSync( + PROTO_PATH, + {keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true + }); +var hello_proto = grpc.loadPackageDefinition(packageDefinition).helloworld; + +/** + * Implements the SayHello RPC method. + */ +function sayHello(call, callback) { + callback(null, {message: 'Hello ' + call.request.name}); +} + +/** + * Starts an RPC server that receives requests for the Greeter service at the + * sample server port + */ +function main() { + var server = new grpc.Server(); + server.addService(hello_proto.Greeter.service, {sayHello: sayHello}); + server.bindAsync('0.0.0.0:50051', grpc.ServerCredentials.createInsecure(), () => { + server.start(); + }); +} + +main(); diff --git a/examples/helloworld/static_codegen/README.md b/examples/helloworld/static_codegen/README.md new file mode 100644 index 000000000..201ffb58e --- /dev/null +++ b/examples/helloworld/static_codegen/README.md @@ -0,0 +1,7 @@ +This is the static code generation variant of the Hello World. Code in these examples is pre-generated using protoc and the Node gRPC protoc plugin, and the generated code can be found in various `*_pb.js` files. The command line sequence for generating those files is as follows (assuming that `protoc` and `grpc_node_plugin` are present, and starting in the directory which contains this README.md file): + +```sh +cd ../protos +npm install -g grpc-tools +grpc_tools_node_protoc --js_out=import_style=commonjs,binary:../helloworld/static_codegen/ --grpc_out=grpc_js:../helloworld/static_codegen/ helloworld.proto +``` diff --git a/examples/helloworld/static_codegen/greeter_client.js b/examples/helloworld/static_codegen/greeter_client.js new file mode 100644 index 000000000..668a3f8e4 --- /dev/null +++ b/examples/helloworld/static_codegen/greeter_client.js @@ -0,0 +1,50 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +var parseArgs = require('minimist'); +var messages = require('./helloworld_pb'); +var services = require('./helloworld_grpc_pb'); + +var grpc = require('@grpc/grpc-js'); + +function main() { + var argv = parseArgs(process.argv.slice(2), { + string: 'target' + }); + var target; + if (argv.target) { + target = argv.target; + } else { + target = 'localhost:50051'; + } + var client = new services.GreeterClient(target, + grpc.credentials.createInsecure()); + var request = new messages.HelloRequest(); + var user; + if (argv._.length > 0) { + user = argv._[0]; + } else { + user = 'world'; + } + request.setName(user); + client.sayHello(request, function(err, response) { + console.log('Greeting:', response.getMessage()); + }); +} + +main(); diff --git a/examples/helloworld/static_codegen/greeter_server.js b/examples/helloworld/static_codegen/greeter_server.js new file mode 100644 index 000000000..7a3e87d80 --- /dev/null +++ b/examples/helloworld/static_codegen/greeter_server.js @@ -0,0 +1,45 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +var messages = require('./helloworld_pb'); +var services = require('./helloworld_grpc_pb'); + +var grpc = require('@grpc/grpc-js'); + +/** + * Implements the SayHello RPC method. + */ +function sayHello(call, callback) { + var reply = new messages.HelloReply(); + reply.setMessage('Hello ' + call.request.getName()); + callback(null, reply); +} + +/** + * Starts an RPC server that receives requests for the Greeter service at the + * sample server port + */ +function main() { + var server = new grpc.Server(); + server.addService(services.GreeterService, {sayHello: sayHello}); + server.bindAsync('0.0.0.0:50051', grpc.ServerCredentials.createInsecure(), () => { + server.start(); + }); +} + +main(); diff --git a/examples/helloworld/static_codegen/helloworld_grpc_pb.js b/examples/helloworld/static_codegen/helloworld_grpc_pb.js new file mode 100644 index 000000000..85dc0f0b7 --- /dev/null +++ b/examples/helloworld/static_codegen/helloworld_grpc_pb.js @@ -0,0 +1,61 @@ +// GENERATED CODE -- DO NOT EDIT! + +// Original file comments: +// Copyright 2015 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +'use strict'; +var grpc = require('@grpc/grpc-js'); +var helloworld_pb = require('./helloworld_pb.js'); + +function serialize_helloworld_HelloReply(arg) { + if (!(arg instanceof helloworld_pb.HelloReply)) { + throw new Error('Expected argument of type helloworld.HelloReply'); + } + return Buffer.from(arg.serializeBinary()); +} + +function deserialize_helloworld_HelloReply(buffer_arg) { + return helloworld_pb.HelloReply.deserializeBinary(new Uint8Array(buffer_arg)); +} + +function serialize_helloworld_HelloRequest(arg) { + if (!(arg instanceof helloworld_pb.HelloRequest)) { + throw new Error('Expected argument of type helloworld.HelloRequest'); + } + return Buffer.from(arg.serializeBinary()); +} + +function deserialize_helloworld_HelloRequest(buffer_arg) { + return helloworld_pb.HelloRequest.deserializeBinary(new Uint8Array(buffer_arg)); +} + + +// The greeting service definition. +var GreeterService = exports.GreeterService = { + // Sends a greeting +sayHello: { + path: '/helloworld.Greeter/SayHello', + requestStream: false, + responseStream: false, + requestType: helloworld_pb.HelloRequest, + responseType: helloworld_pb.HelloReply, + requestSerialize: serialize_helloworld_HelloRequest, + requestDeserialize: deserialize_helloworld_HelloRequest, + responseSerialize: serialize_helloworld_HelloReply, + responseDeserialize: deserialize_helloworld_HelloReply, + }, +}; + +exports.GreeterClient = grpc.makeGenericClientConstructor(GreeterService); diff --git a/examples/helloworld/static_codegen/helloworld_pb.js b/examples/helloworld/static_codegen/helloworld_pb.js new file mode 100644 index 000000000..e67680281 --- /dev/null +++ b/examples/helloworld/static_codegen/helloworld_pb.js @@ -0,0 +1,319 @@ +// source: helloworld.proto +/** + * @fileoverview + * @enhanceable + * @suppress {messageConventions} JS Compiler reports an error if a variable or + * field starts with 'MSG_' and isn't a translatable message. + * @public + */ +// GENERATED CODE -- DO NOT EDIT! + +var jspb = require('google-protobuf'); +var goog = jspb; +var global = Function('return this')(); + +goog.exportSymbol('proto.helloworld.HelloReply', null, global); +goog.exportSymbol('proto.helloworld.HelloRequest', null, global); +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.helloworld.HelloRequest = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.helloworld.HelloRequest, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.helloworld.HelloRequest.displayName = 'proto.helloworld.HelloRequest'; +} +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.helloworld.HelloReply = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.helloworld.HelloReply, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.helloworld.HelloReply.displayName = 'proto.helloworld.HelloReply'; +} + + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.helloworld.HelloRequest.prototype.toObject = function(opt_includeInstance) { + return proto.helloworld.HelloRequest.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.helloworld.HelloRequest} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.helloworld.HelloRequest.toObject = function(includeInstance, msg) { + var f, obj = { + name: jspb.Message.getFieldWithDefault(msg, 1, "") + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.helloworld.HelloRequest} + */ +proto.helloworld.HelloRequest.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.helloworld.HelloRequest; + return proto.helloworld.HelloRequest.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.helloworld.HelloRequest} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.helloworld.HelloRequest} + */ +proto.helloworld.HelloRequest.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = /** @type {string} */ (reader.readString()); + msg.setName(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.helloworld.HelloRequest.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.helloworld.HelloRequest.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.helloworld.HelloRequest} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.helloworld.HelloRequest.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getName(); + if (f.length > 0) { + writer.writeString( + 1, + f + ); + } +}; + + +/** + * optional string name = 1; + * @return {string} + */ +proto.helloworld.HelloRequest.prototype.getName = function() { + return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, "")); +}; + + +/** + * @param {string} value + * @return {!proto.helloworld.HelloRequest} returns this + */ +proto.helloworld.HelloRequest.prototype.setName = function(value) { + return jspb.Message.setProto3StringField(this, 1, value); +}; + + + + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.helloworld.HelloReply.prototype.toObject = function(opt_includeInstance) { + return proto.helloworld.HelloReply.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.helloworld.HelloReply} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.helloworld.HelloReply.toObject = function(includeInstance, msg) { + var f, obj = { + message: jspb.Message.getFieldWithDefault(msg, 1, "") + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.helloworld.HelloReply} + */ +proto.helloworld.HelloReply.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.helloworld.HelloReply; + return proto.helloworld.HelloReply.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.helloworld.HelloReply} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.helloworld.HelloReply} + */ +proto.helloworld.HelloReply.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = /** @type {string} */ (reader.readString()); + msg.setMessage(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.helloworld.HelloReply.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.helloworld.HelloReply.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.helloworld.HelloReply} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.helloworld.HelloReply.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getMessage(); + if (f.length > 0) { + writer.writeString( + 1, + f + ); + } +}; + + +/** + * optional string message = 1; + * @return {string} + */ +proto.helloworld.HelloReply.prototype.getMessage = function() { + return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, "")); +}; + + +/** + * @param {string} value + * @return {!proto.helloworld.HelloReply} returns this + */ +proto.helloworld.HelloReply.prototype.setMessage = function(value) { + return jspb.Message.setProto3StringField(this, 1, value); +}; + + +goog.object.extend(exports, proto.helloworld); diff --git a/examples/metadata/README.md b/examples/metadata/README.md new file mode 100644 index 000000000..f8b55de2c --- /dev/null +++ b/examples/metadata/README.md @@ -0,0 +1,15 @@ +# Metadata example + +This example shows how to set and read metadata in RPC headers and trailers. + +## Start the server + +``` +node server.js +``` + +## Run the client + +``` +node client.js +``` diff --git a/examples/metadata/client.js b/examples/metadata/client.js new file mode 100644 index 000000000..e8f6f53fa --- /dev/null +++ b/examples/metadata/client.js @@ -0,0 +1,262 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +const grpc = require('@grpc/grpc-js'); +const protoLoader = require('@grpc/proto-loader'); +const parseArgs = require('minimist'); + +const PROTO_PATH = __dirname + '/../protos/echo.proto'; + +const packageDefinition = protoLoader.loadSync( + PROTO_PATH, + {keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true + }); +const echoProto = grpc.loadPackageDefinition(packageDefinition).grpc.examples.echo; + +const STREAMING_COUNT = 10; + +function unaryCallWithMetadata(client, message) { + return new Promise((resolve, reject) => { + console.log('--- unary ---'); + const requestMetadata = new grpc.Metadata(); + requestMetadata.set('timestamp', new Date().toISOString()); + const call = client.unaryEcho({message}, requestMetadata, (error, value) => { + if (error) { + console.log(`Received error ${error}`); + return; + } + console.log('Response:'); + console.log(`- ${JSON.stringify(value)}`); + }); + call.on('metadata', metadata => { + const timestamps = metadata.get('timestamp'); + if (timestamps.length > 0) { + console.log('timestamp from header:'); + for (const [index, value] of timestamps.entries()) { + console.log(` ${index}. ${value}`); + } + } else { + console.error("timestamp expected but doesn't exist in header"); + } + const locations = metadata.get('location'); + if (locations.length > 0) { + console.log('location from header:'); + for (const [index, value] of locations.entries()) { + console.log(` ${index}. ${value}`); + } + } else { + console.error("location expected but doesn't exist in header"); + } + }); + call.on('status', status => { + const timestamps = status.metadata.get('timestamp'); + if (timestamps.length > 0) { + console.log('timestamp from trailer:'); + for (const [index, value] of timestamps.entries()) { + console.log(` ${index}. ${value}`); + } + } else { + console.error("timestamp expected but doesn't exist in trailer"); + } + resolve(); + }); + }); +} + +function serverStreamingWithMetadata(client, message) { + return new Promise((resolve, reject) => { + console.log('--- server streaming ---'); + const requestMetadata = new grpc.Metadata(); + requestMetadata.set('timestamp', new Date().toISOString()); + const call = client.serverStreamingEcho({message}, requestMetadata); + call.on('metadata', metadata => { + const timestamps = metadata.get('timestamp'); + if (timestamps.length > 0) { + console.log('timestamp from header:'); + for (const [index, value] of timestamps.entries()) { + console.log(` ${index}. ${value}`); + } + } else { + console.error("timestamp expected but doesn't exist in header"); + } + const locations = metadata.get('location'); + if (locations.length > 0) { + console.log('location from header:'); + for (const [index, value] of locations.entries()) { + console.log(` ${index}. ${value}`); + } + } else { + console.error("location expected but doesn't exist in header"); + } + }); + call.on('data', value => { + console.log(`Received response ${JSON.stringify(value)}`); + }); + call.on('status', status => { + const timestamps = status.metadata.get('timestamp'); + if (timestamps.length > 0) { + console.log('timestamp from trailer:'); + for (const [index, value] of timestamps.entries()) { + console.log(` ${index}. ${value}`); + } + } else { + console.error("timestamp expected but doesn't exist in trailer"); + } + resolve(); + }); + call.on('error', error => { + console.log(`Received error ${error}`); + }); + }); +} + +function clientStreamingWithMetadata(client, message) { + return new Promise((resolve, reject) => { + console.log('--- client streaming ---'); + const requestMetadata = new grpc.Metadata(); + requestMetadata.set('timestamp', new Date().toISOString()); + const call = client.clientStreamingEcho(requestMetadata, (error, value) => { + if (error) { + console.log(`Received error ${error}`); + return; + } + console.log('Response:'); + console.log(`- ${JSON.stringify(value)}`); + }); + call.on('metadata', metadata => { + const timestamps = metadata.get('timestamp'); + if (timestamps.length > 0) { + console.log('timestamp from header:'); + for (const [index, value] of timestamps.entries()) { + console.log(` ${index}. ${value}`); + } + } else { + console.error("timestamp expected but doesn't exist in header"); + } + const locations = metadata.get('location'); + if (locations.length > 0) { + console.log('location from header:'); + for (const [index, value] of locations.entries()) { + console.log(` ${index}. ${value}`); + } + } else { + console.error("location expected but doesn't exist in header"); + } + }); + call.on('status', status => { + const timestamps = status.metadata.get('timestamp'); + if (timestamps.length > 0) { + console.log('timestamp from trailer:'); + for (const [index, value] of timestamps.entries()) { + console.log(` ${index}. ${value}`); + } + } else { + console.error("timestamp expected but doesn't exist in trailer"); + } + resolve(); + }); + for (let i = 0; i < STREAMING_COUNT; i++) { + call.write({message}); + } + call.end(); + }); +} + +function bidirectionalWithMetadata(client, message) { + return new Promise((resolve, reject) => { + console.log('--- bidirectional ---'); + const requestMetadata = new grpc.Metadata(); + requestMetadata.set('timestamp', new Date().toISOString()); + const call = client.bidirectionalStreamingEcho(requestMetadata); + call.on('metadata', metadata => { + const timestamps = metadata.get('timestamp'); + if (timestamps.length > 0) { + console.log('timestamp from header:'); + for (const [index, value] of timestamps.entries()) { + console.log(` ${index}. ${value}`); + } + } else { + console.error("timestamp expected but doesn't exist in header"); + } + const locations = metadata.get('location'); + if (locations.length > 0) { + console.log('location from header:'); + for (const [index, value] of locations.entries()) { + console.log(` ${index}. ${value}`); + } + } else { + console.error("location expected but doesn't exist in header"); + } + }); + call.on('data', value => { + console.log(`Received response ${JSON.stringify(value)}`); + }); + call.on('status', status => { + const timestamps = status.metadata.get('timestamp'); + if (timestamps.length > 0) { + console.log('timestamp from trailer:'); + for (const [index, value] of timestamps.entries()) { + console.log(` ${index}. ${value}`); + } + } else { + console.error("timestamp expected but doesn't exist in trailer"); + } + resolve(); + }); + call.on('error', error => { + console.log(`Received error ${error}`); + }); + for (let i = 0; i < STREAMING_COUNT; i++) { + call.write({message}); + } + call.end(); + }); +} + +function asyncWait(ms) { + return new Promise((resolve, reject) => { + setTimeout(resolve, ms); + }); +} + +const message = 'this is examples/metadata'; + +async function main() { + let argv = parseArgs(process.argv.slice(2), { + string: 'target', + default: {target: 'localhost:50052'} + }); + const client = new echoProto.Echo(argv.target, grpc.credentials.createInsecure()); + await unaryCallWithMetadata(client, message); + await asyncWait(1000); + + await serverStreamingWithMetadata(client, message); + await asyncWait(1000); + + await clientStreamingWithMetadata(client, message); + await asyncWait(1000); + + await bidirectionalWithMetadata(client, message); + client.close(); +} + +main(); diff --git a/examples/metadata/server.js b/examples/metadata/server.js new file mode 100644 index 000000000..b061d20a2 --- /dev/null +++ b/examples/metadata/server.js @@ -0,0 +1,156 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +const grpc = require('@grpc/grpc-js'); +const protoLoader = require('@grpc/proto-loader'); + +const PROTO_PATH = __dirname + '/../protos/echo.proto'; + +const packageDefinition = protoLoader.loadSync( + PROTO_PATH, + {keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true + }); +const echoProto = grpc.loadPackageDefinition(packageDefinition).grpc.examples.echo; + +const STREAMING_COUNT = 10; + +function unaryEcho(call, callback) { + console.log('--- UnaryEcho ---'); + const incomingTimestamps = call.metadata.get('timestamp'); + if (incomingTimestamps.length > 0) { + console.log('Timestamp from metadata:'); + for (const [index, value] of incomingTimestamps.entries()) { + console.log(` ${index}. ${value}`); + } + } + + const outgoingHeaders = new grpc.Metadata(); + outgoingHeaders.set('location', 'MTV'); + outgoingHeaders.set('timestamp', new Date().toISOString()); + call.sendMetadata(outgoingHeaders); + + const outgoingTrailers = new grpc.Metadata(); + outgoingTrailers.set('timestamp', new Date().toISOString()); + + console.log(`Request received ${JSON.stringify(call.request)}, sending echo`); + callback(null, call.request, outgoingTrailers); +} + +function serverStreamingEcho(call) { + console.log('--- ServerStreamingEcho ---'); + const incomingTimestamps = call.metadata.get('timestamp'); + if (incomingTimestamps.length > 0) { + console.log('Timestamp from metadata:'); + for (const [index, value] of incomingTimestamps.entries()) { + console.log(` ${index}. ${value}`); + } + } + + const outgoingHeaders = new grpc.Metadata(); + outgoingHeaders.set('location', 'MTV'); + outgoingHeaders.set('timestamp', new Date().toISOString()); + call.sendMetadata(outgoingHeaders); + + console.log(`Request received ${JSON.stringify(call.request)}`); + for (let i = 0; i < STREAMING_COUNT; i++) { + console.log(`Echo message ${JSON.stringify(call.request)}`); + call.write(call.request); + } + + const outgoingTrailers = new grpc.Metadata(); + outgoingTrailers.set('timestamp', new Date().toISOString()); + call.end(outgoingTrailers); +} + +function clientStreamingEcho(call, callback) { + console.log('--- ClientStreamingEcho ---'); + const incomingTimestamps = call.metadata.get('timestamp'); + if (incomingTimestamps.length > 0) { + console.log('Timestamp from metadata:'); + for (const [index, value] of incomingTimestamps.entries()) { + console.log(` ${index}. ${value}`); + } + } + + const outgoingHeaders = new grpc.Metadata(); + outgoingHeaders.set('location', 'MTV'); + outgoingHeaders.set('timestamp', new Date().toISOString()); + call.sendMetadata(outgoingHeaders); + + let lastReceivedMessage = ''; + call.on('data', value => { + console.log(`Received request ${JSON.stringify(value)}`); + lastReceivedMessage = value.message; + }); + call.on('end', () => { + const outgoingTrailers = new grpc.Metadata(); + outgoingTrailers.set('timestamp', new Date().toISOString()); + callback(null, {message: lastReceivedMessage}, outgoingTrailers); + }); +} + +function bidirectionalStreamingEcho(call) { + console.log('--- BidirectionalStreamingEcho ---'); + const incomingTimestamps = call.metadata.get('timestamp'); + if (incomingTimestamps.length > 0) { + console.log('Timestamp from metadata:'); + for (const [index, value] of incomingTimestamps.entries()) { + console.log(` ${index}. ${value}`); + } + } + + const outgoingHeaders = new grpc.Metadata(); + outgoingHeaders.set('location', 'MTV'); + outgoingHeaders.set('timestamp', new Date().toISOString()); + call.sendMetadata(outgoingHeaders); + + call.on('data', value => { + console.log(`Request received ${JSON.stringify(value)}, sending echo`); + call.write(value); + }); + call.on('end', () => { + const outgoingTrailers = new grpc.Metadata(); + outgoingTrailers.set('timestamp', new Date().toISOString()); + call.end(outgoingTrailers); + }); +} + +const serviceImplementation = { + unaryEcho, + serverStreamingEcho, + clientStreamingEcho, + bidirectionalStreamingEcho +}; + +function main() { + const argv = parseArgs(process.argv.slice(2), { + string: 'port', + default: {port: '50052'} + }); + const server = new grpc.Server(); + server.addService(echoProto.Echo.service, serviceImplementation); + server.bindAsync(`0.0.0.0:${argv.port}`, grpc.ServerCredentials.createInsecure(), () => { + server.start(); + }); +} + +main(); diff --git a/examples/package.json b/examples/package.json new file mode 100644 index 000000000..6857aa5d9 --- /dev/null +++ b/examples/package.json @@ -0,0 +1,14 @@ +{ + "name": "grpc-examples", + "version": "0.1.0", + "dependencies": { + "@grpc/proto-loader": "^0.6.0", + "async": "^1.5.2", + "google-protobuf": "^3.0.0", + "@grpc/grpc-js": "^1.8.0", + "@grpc/grpc-js-xds": "^1.8.0", + "@grpc/reflection": "^1.0.0", + "lodash": "^4.6.1", + "minimist": "^1.2.0" + } +} diff --git a/examples/protos/BUILD b/examples/protos/BUILD new file mode 100644 index 000000000..929cad93a --- /dev/null +++ b/examples/protos/BUILD @@ -0,0 +1,82 @@ +# Copyright 2020 the gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("@rules_proto//proto:defs.bzl", "proto_library") +load("//bazel:cc_grpc_library.bzl", "cc_grpc_library") +load("//bazel:grpc_build_system.bzl", "grpc_proto_library") +load("//bazel:python_rules.bzl", "py_grpc_library", "py_proto_library") + +licenses(["notice"]) + +package(default_visibility = ["//visibility:public"]) + +grpc_proto_library( + name = "auth_sample", + srcs = ["auth_sample.proto"], +) + +grpc_proto_library( + name = "hellostreamingworld", + srcs = ["hellostreamingworld.proto"], +) + +# The following three rules demonstrate the usage of the cc_grpc_library rule in +# in a mode compatible with the native proto_library and cc_proto_library rules. +proto_library( + name = "helloworld_proto", + srcs = ["helloworld.proto"], +) + +cc_proto_library( + name = "helloworld_cc_proto", + deps = [":helloworld_proto"], +) + +cc_grpc_library( + name = "helloworld_cc_grpc", + srcs = [":helloworld_proto"], + grpc_only = True, + deps = [":helloworld_cc_proto"], +) + +grpc_proto_library( + name = "route_guide", + srcs = ["route_guide.proto"], +) + +proto_library( + name = "keyvaluestore_proto", + srcs = ["keyvaluestore.proto"], +) + +grpc_proto_library( + name = "keyvaluestore", + srcs = ["keyvaluestore.proto"], +) + +py_proto_library( + name = "helloworld_py_pb2", + deps = [":helloworld_proto"], +) + +py_grpc_library( + name = "helloworld_py_pb2_grpc", + srcs = [":helloworld_proto"], + deps = [":helloworld_py_pb2"], +) + +proto_library( + name = "route_guide_proto", + srcs = [":route_guide.proto"], +) diff --git a/examples/protos/README.md b/examples/protos/README.md new file mode 100644 index 000000000..48df7c894 --- /dev/null +++ b/examples/protos/README.md @@ -0,0 +1,8 @@ +# Example protos + +## Contents + +- [helloworld.proto] + - The simple example used in the overview. +- [route_guide.proto] + - An example service described in detail in the tutorial. diff --git a/examples/protos/auth_sample.proto b/examples/protos/auth_sample.proto new file mode 100644 index 000000000..7e63602f0 --- /dev/null +++ b/examples/protos/auth_sample.proto @@ -0,0 +1,42 @@ +// Copyright 2015 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package grpc.testing; + +option objc_class_prefix = "AUTH"; + +// Unary request. +message Request { + // Whether Response should include username. + bool fill_username = 4; + + // Whether Response should include OAuth scope. + bool fill_oauth_scope = 5; +} + +// Unary response, as configured by the request. +message Response { + // The user the request came from, for verifying authentication was + // successful. + string username = 2; + // OAuth scope. + string oauth_scope = 3; +} + +service TestService { + // One request followed by one response. + rpc UnaryCall(Request) returns (Response); +} diff --git a/examples/protos/echo.proto b/examples/protos/echo.proto new file mode 100644 index 000000000..2dde5633e --- /dev/null +++ b/examples/protos/echo.proto @@ -0,0 +1,45 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +syntax = "proto3"; + +option go_package = "google.golang.org/grpc/examples/features/proto/echo"; + +package grpc.examples.echo; + +// EchoRequest is the request for echo. +message EchoRequest { + string message = 1; +} + +// EchoResponse is the response for echo. +message EchoResponse { + string message = 1; +} + +// Echo is the echo service. +service Echo { + // UnaryEcho is unary echo. + rpc UnaryEcho(EchoRequest) returns (EchoResponse) {} + // ServerStreamingEcho is server side streaming. + rpc ServerStreamingEcho(EchoRequest) returns (stream EchoResponse) {} + // ClientStreamingEcho is client side streaming. + rpc ClientStreamingEcho(stream EchoRequest) returns (EchoResponse) {} + // BidirectionalStreamingEcho is bidi streaming. + rpc BidirectionalStreamingEcho(stream EchoRequest) returns (stream EchoResponse) {} +} diff --git a/examples/protos/hellostreamingworld.proto b/examples/protos/hellostreamingworld.proto new file mode 100644 index 000000000..8a322bd61 --- /dev/null +++ b/examples/protos/hellostreamingworld.proto @@ -0,0 +1,39 @@ +// Copyright 2015 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +option java_package = "ex.grpc"; +option objc_class_prefix = "HSW"; + +package hellostreamingworld; + +// The greeting service definition. +service MultiGreeter { + // Sends multiple greetings + rpc sayHello (HelloRequest) returns (stream HelloReply) {} +} + +// The request message containing the user's name and how many greetings +// they want. +message HelloRequest { + string name = 1; + string num_greetings = 2; +} + +// A response message containing a greeting +message HelloReply { + string message = 1; +} + diff --git a/examples/protos/helloworld.proto b/examples/protos/helloworld.proto new file mode 100644 index 000000000..7e50d0fc7 --- /dev/null +++ b/examples/protos/helloworld.proto @@ -0,0 +1,40 @@ +// Copyright 2015 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "io.grpc.examples.helloworld"; +option java_outer_classname = "HelloWorldProto"; +option objc_class_prefix = "HLW"; + +package helloworld; + +// The greeting service definition. +service Greeter { + // Sends a greeting + rpc SayHello (HelloRequest) returns (HelloReply) {} + + rpc SayHelloStreamReply (HelloRequest) returns (stream HelloReply) {} +} + +// The request message containing the user's name. +message HelloRequest { + string name = 1; +} + +// The response message containing the greetings +message HelloReply { + string message = 1; +} diff --git a/examples/protos/keyvaluestore.proto b/examples/protos/keyvaluestore.proto new file mode 100644 index 000000000..74ad57e02 --- /dev/null +++ b/examples/protos/keyvaluestore.proto @@ -0,0 +1,33 @@ +// Copyright 2018 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package keyvaluestore; + +// A simple key-value storage service +service KeyValueStore { + // Provides a value for each key request + rpc GetValues (stream Request) returns (stream Response) {} +} + +// The request message containing the key +message Request { + string key = 1; +} + +// The response message containing the value associated with the key +message Response { + string value = 1; +} diff --git a/examples/protos/route_guide.proto b/examples/protos/route_guide.proto new file mode 100644 index 000000000..b519f5582 --- /dev/null +++ b/examples/protos/route_guide.proto @@ -0,0 +1,111 @@ +// Copyright 2015 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "io.grpc.examples.routeguide"; +option java_outer_classname = "RouteGuideProto"; +option objc_class_prefix = "RTG"; + +package routeguide; + +// Interface exported by the server. +service RouteGuide { + // A simple RPC. + // + // Obtains the feature at a given position. + // + // A feature with an empty name is returned if there's no feature at the given + // position. + rpc GetFeature(Point) returns (Feature) {} + + // A server-to-client streaming RPC. + // + // Obtains the Features available within the given Rectangle. Results are + // streamed rather than returned at once (e.g. in a response message with a + // repeated field), as the rectangle may cover a large area and contain a + // huge number of features. + rpc ListFeatures(Rectangle) returns (stream Feature) {} + + // A client-to-server streaming RPC. + // + // Accepts a stream of Points on a route being traversed, returning a + // RouteSummary when traversal is completed. + rpc RecordRoute(stream Point) returns (RouteSummary) {} + + // A Bidirectional streaming RPC. + // + // Accepts a stream of RouteNotes sent while a route is being traversed, + // while receiving other RouteNotes (e.g. from other users). + rpc RouteChat(stream RouteNote) returns (stream RouteNote) {} +} + +// Points are represented as latitude-longitude pairs in the E7 representation +// (degrees multiplied by 10**7 and rounded to the nearest integer). +// Latitudes should be in the range +/- 90 degrees and longitude should be in +// the range +/- 180 degrees (inclusive). +message Point { + int32 latitude = 1; + int32 longitude = 2; +} + +// A latitude-longitude rectangle, represented as two diagonally opposite +// points "lo" and "hi". +message Rectangle { + // One corner of the rectangle. + Point lo = 1; + + // The other corner of the rectangle. + Point hi = 2; +} + +// A feature names something at a given point. +// +// If a feature could not be named, the name is empty. +message Feature { + // The name of the feature. + string name = 1; + + // The point where the feature is detected. + Point location = 2; +} + +// A RouteNote is a message sent while at a given point. +message RouteNote { + // The location from which the message is sent. + Point location = 1; + + // The message to be sent. + string message = 2; +} + +// A RouteSummary is received in response to a RecordRoute rpc. +// +// It contains the number of individual points received, the number of +// detected features, and the total distance covered as the cumulative sum of +// the distance between each point. +message RouteSummary { + // The number of points received. + int32 point_count = 1; + + // The number of known features passed while traversing the route. + int32 feature_count = 2; + + // The distance covered in metres. + int32 distance = 3; + + // The duration of the traversal in seconds. + int32 elapsed_time = 4; +} diff --git a/examples/reflection/server.js b/examples/reflection/server.js new file mode 100644 index 000000000..83232e8e5 --- /dev/null +++ b/examples/reflection/server.js @@ -0,0 +1,20 @@ +var path = require('path'); +var grpc = require('@grpc/grpc-js'); +var protoLoader = require('@grpc/proto-loader'); +var reflection = require('@grpc/reflection'); + +var PROTO_PATH = path.join(__dirname, '../protos/helloworld.proto'); + +var server = new grpc.Server(); +var packageDefinition = protoLoader.loadSync(PROTO_PATH); +var proto = grpc.loadPackageDefinition(packageDefinition); +var reflection = new reflection.ReflectionService(packageDefinition); + +reflection.addToServer(server); +server.addService(proto.helloworld.Greeter.service, { + sayHello: (call, callback) => { callback(null, { message: 'Hello' }) } +}); + +server.bindAsync('localhost:5000', grpc.ServerCredentials.createInsecure(), () => { + server.start(); +}); diff --git a/examples/routeguide/README.md b/examples/routeguide/README.md new file mode 100644 index 000000000..fcd147054 --- /dev/null +++ b/examples/routeguide/README.md @@ -0,0 +1,5 @@ +# gRPC Basics: Node.js sample code + +The files in this folder are the samples used in [gRPC Basics: Node.js][], a detailed tutorial for using gRPC in Node.js. + +[gRPC Basics: Node.js]:https://grpc.io/docs/languages/node/basics diff --git a/examples/routeguide/dynamic_codegen/route_guide_client.js b/examples/routeguide/dynamic_codegen/route_guide_client.js new file mode 100644 index 000000000..781464e6d --- /dev/null +++ b/examples/routeguide/dynamic_codegen/route_guide_client.js @@ -0,0 +1,237 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +var PROTO_PATH = __dirname + '/../../protos/route_guide.proto'; + +var async = require('async'); +var fs = require('fs'); +var parseArgs = require('minimist'); +var path = require('path'); +var _ = require('lodash'); +var grpc = require('@grpc/grpc-js'); +var protoLoader = require('@grpc/proto-loader'); +var packageDefinition = protoLoader.loadSync( + PROTO_PATH, + {keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true + }); +var routeguide = grpc.loadPackageDefinition(packageDefinition).routeguide; +var client = new routeguide.RouteGuide('localhost:50051', + grpc.credentials.createInsecure()); + +var COORD_FACTOR = 1e7; + +/** + * Run the getFeature demo. Calls getFeature with a point known to have a + * feature and a point known not to have a feature. + * @param {function} callback Called when this demo is complete + */ +function runGetFeature(callback) { + var next = _.after(2, callback); + function featureCallback(error, feature) { + if (error) { + callback(error); + return; + } + if (feature.name === '') { + console.log('Found no feature at ' + + feature.location.latitude/COORD_FACTOR + ', ' + + feature.location.longitude/COORD_FACTOR); + } else { + console.log('Found feature called "' + feature.name + '" at ' + + feature.location.latitude/COORD_FACTOR + ', ' + + feature.location.longitude/COORD_FACTOR); + } + next(); + } + var point1 = { + latitude: 409146138, + longitude: -746188906 + }; + var point2 = { + latitude: 0, + longitude: 0 + }; + client.getFeature(point1, featureCallback); + client.getFeature(point2, featureCallback); +} + +/** + * Run the listFeatures demo. Calls listFeatures with a rectangle containing all + * of the features in the pre-generated database. Prints each response as it + * comes in. + * @param {function} callback Called when this demo is complete + */ +function runListFeatures(callback) { + var rectangle = { + lo: { + latitude: 400000000, + longitude: -750000000 + }, + hi: { + latitude: 420000000, + longitude: -730000000 + } + }; + console.log('Looking for features between 40, -75 and 42, -73'); + var call = client.listFeatures(rectangle); + call.on('data', function(feature) { + console.log('Found feature called "' + feature.name + '" at ' + + feature.location.latitude/COORD_FACTOR + ', ' + + feature.location.longitude/COORD_FACTOR); + }); + call.on('end', callback); +} + +/** + * Run the recordRoute demo. Sends several randomly chosen points from the + * pre-generated feature database with a variable delay in between. Prints the + * statistics when they are sent from the server. + * @param {function} callback Called when this demo is complete + */ +function runRecordRoute(callback) { + var argv = parseArgs(process.argv, { + string: 'db_path' + }); + fs.readFile(path.resolve(argv.db_path), function(err, data) { + if (err) { + callback(err); + return; + } + var feature_list = JSON.parse(data); + + var num_points = 10; + var call = client.recordRoute(function(error, stats) { + if (error) { + callback(error); + return; + } + console.log('Finished trip with', stats.point_count, 'points'); + console.log('Passed', stats.feature_count, 'features'); + console.log('Travelled', stats.distance, 'meters'); + console.log('It took', stats.elapsed_time, 'seconds'); + callback(); + }); + /** + * Constructs a function that asynchronously sends the given point and then + * delays sending its callback + * @param {number} lat The latitude to send + * @param {number} lng The longitude to send + * @return {function(function)} The function that sends the point + */ + function pointSender(lat, lng) { + /** + * Sends the point, then calls the callback after a delay + * @param {function} callback Called when complete + */ + return function(callback) { + console.log('Visiting point ' + lat/COORD_FACTOR + ', ' + + lng/COORD_FACTOR); + call.write({ + latitude: lat, + longitude: lng + }); + _.delay(callback, _.random(500, 1500)); + }; + } + var point_senders = []; + for (var i = 0; i < num_points; i++) { + var rand_point = feature_list[_.random(0, feature_list.length - 1)]; + point_senders[i] = pointSender(rand_point.location.latitude, + rand_point.location.longitude); + } + async.series(point_senders, function() { + call.end(); + }); + }); +} + +/** + * Run the routeChat demo. Send some chat messages, and print any chat messages + * that are sent from the server. + * @param {function} callback Called when the demo is complete + */ +function runRouteChat(callback) { + var call = client.routeChat(); + call.on('data', function(note) { + console.log('Got message "' + note.message + '" at ' + + note.location.latitude + ', ' + note.location.longitude); + }); + + call.on('end', callback); + + var notes = [{ + location: { + latitude: 0, + longitude: 0 + }, + message: 'First message' + }, { + location: { + latitude: 0, + longitude: 1 + }, + message: 'Second message' + }, { + location: { + latitude: 1, + longitude: 0 + }, + message: 'Third message' + }, { + location: { + latitude: 0, + longitude: 0 + }, + message: 'Fourth message' + }]; + for (var i = 0; i < notes.length; i++) { + var note = notes[i]; + console.log('Sending message "' + note.message + '" at ' + + note.location.latitude + ', ' + note.location.longitude); + call.write(note); + } + call.end(); +} + +/** + * Run all of the demos in order + */ +function main() { + async.series([ + runGetFeature, + runListFeatures, + runRecordRoute, + runRouteChat + ]); +} + +if (require.main === module) { + main(); +} + +exports.runGetFeature = runGetFeature; + +exports.runListFeatures = runListFeatures; + +exports.runRecordRoute = runRecordRoute; + +exports.runRouteChat = runRouteChat; diff --git a/examples/routeguide/dynamic_codegen/route_guide_db.json b/examples/routeguide/dynamic_codegen/route_guide_db.json new file mode 100644 index 000000000..9d6a980ab --- /dev/null +++ b/examples/routeguide/dynamic_codegen/route_guide_db.json @@ -0,0 +1,601 @@ +[{ + "location": { + "latitude": 407838351, + "longitude": -746143763 + }, + "name": "Patriots Path, Mendham, NJ 07945, USA" +}, { + "location": { + "latitude": 408122808, + "longitude": -743999179 + }, + "name": "101 New Jersey 10, Whippany, NJ 07981, USA" +}, { + "location": { + "latitude": 413628156, + "longitude": -749015468 + }, + "name": "U.S. 6, Shohola, PA 18458, USA" +}, { + "location": { + "latitude": 419999544, + "longitude": -740371136 + }, + "name": "5 Conners Road, Kingston, NY 12401, USA" +}, { + "location": { + "latitude": 414008389, + "longitude": -743951297 + }, + "name": "Mid Hudson Psychiatric Center, New Hampton, NY 10958, USA" +}, { + "location": { + "latitude": 419611318, + "longitude": -746524769 + }, + "name": "287 Flugertown Road, Livingston Manor, NY 12758, USA" +}, { + "location": { + "latitude": 406109563, + "longitude": -742186778 + }, + "name": "4001 Tremley Point Road, Linden, NJ 07036, USA" +}, { + "location": { + "latitude": 416802456, + "longitude": -742370183 + }, + "name": "352 South Mountain Road, Wallkill, NY 12589, USA" +}, { + "location": { + "latitude": 412950425, + "longitude": -741077389 + }, + "name": "Bailey Turn Road, Harriman, NY 10926, USA" +}, { + "location": { + "latitude": 412144655, + "longitude": -743949739 + }, + "name": "193-199 Wawayanda Road, Hewitt, NJ 07421, USA" +}, { + "location": { + "latitude": 415736605, + "longitude": -742847522 + }, + "name": "406-496 Ward Avenue, Pine Bush, NY 12566, USA" +}, { + "location": { + "latitude": 413843930, + "longitude": -740501726 + }, + "name": "162 Merrill Road, Highland Mills, NY 10930, USA" +}, { + "location": { + "latitude": 410873075, + "longitude": -744459023 + }, + "name": "Clinton Road, West Milford, NJ 07480, USA" +}, { + "location": { + "latitude": 412346009, + "longitude": -744026814 + }, + "name": "16 Old Brook Lane, Warwick, NY 10990, USA" +}, { + "location": { + "latitude": 402948455, + "longitude": -747903913 + }, + "name": "3 Drake Lane, Pennington, NJ 08534, USA" +}, { + "location": { + "latitude": 406337092, + "longitude": -740122226 + }, + "name": "6324 8th Avenue, Brooklyn, NY 11220, USA" +}, { + "location": { + "latitude": 406421967, + "longitude": -747727624 + }, + "name": "1 Merck Access Road, Whitehouse Station, NJ 08889, USA" +}, { + "location": { + "latitude": 416318082, + "longitude": -749677716 + }, + "name": "78-98 Schalck Road, Narrowsburg, NY 12764, USA" +}, { + "location": { + "latitude": 415301720, + "longitude": -748416257 + }, + "name": "282 Lakeview Drive Road, Highland Lake, NY 12743, USA" +}, { + "location": { + "latitude": 402647019, + "longitude": -747071791 + }, + "name": "330 Evelyn Avenue, Hamilton Township, NJ 08619, USA" +}, { + "location": { + "latitude": 412567807, + "longitude": -741058078 + }, + "name": "New York State Reference Route 987E, Southfields, NY 10975, USA" +}, { + "location": { + "latitude": 416855156, + "longitude": -744420597 + }, + "name": "103-271 Tempaloni Road, Ellenville, NY 12428, USA" +}, { + "location": { + "latitude": 404663628, + "longitude": -744820157 + }, + "name": "1300 Airport Road, North Brunswick Township, NJ 08902, USA" +}, { + "location": { + "latitude": 407113723, + "longitude": -749746483 + }, + "name": "" +}, { + "location": { + "latitude": 402133926, + "longitude": -743613249 + }, + "name": "" +}, { + "location": { + "latitude": 400273442, + "longitude": -741220915 + }, + "name": "" +}, { + "location": { + "latitude": 411236786, + "longitude": -744070769 + }, + "name": "" +}, { + "location": { + "latitude": 411633782, + "longitude": -746784970 + }, + "name": "211-225 Plains Road, Augusta, NJ 07822, USA" +}, { + "location": { + "latitude": 415830701, + "longitude": -742952812 + }, + "name": "" +}, { + "location": { + "latitude": 413447164, + "longitude": -748712898 + }, + "name": "165 Pedersen Ridge Road, Milford, PA 18337, USA" +}, { + "location": { + "latitude": 405047245, + "longitude": -749800722 + }, + "name": "100-122 Locktown Road, Frenchtown, NJ 08825, USA" +}, { + "location": { + "latitude": 418858923, + "longitude": -746156790 + }, + "name": "" +}, { + "location": { + "latitude": 417951888, + "longitude": -748484944 + }, + "name": "650-652 Willi Hill Road, Swan Lake, NY 12783, USA" +}, { + "location": { + "latitude": 407033786, + "longitude": -743977337 + }, + "name": "26 East 3rd Street, New Providence, NJ 07974, USA" +}, { + "location": { + "latitude": 417548014, + "longitude": -740075041 + }, + "name": "" +}, { + "location": { + "latitude": 410395868, + "longitude": -744972325 + }, + "name": "" +}, { + "location": { + "latitude": 404615353, + "longitude": -745129803 + }, + "name": "" +}, { + "location": { + "latitude": 406589790, + "longitude": -743560121 + }, + "name": "611 Lawrence Avenue, Westfield, NJ 07090, USA" +}, { + "location": { + "latitude": 414653148, + "longitude": -740477477 + }, + "name": "18 Lannis Avenue, New Windsor, NY 12553, USA" +}, { + "location": { + "latitude": 405957808, + "longitude": -743255336 + }, + "name": "82-104 Amherst Avenue, Colonia, NJ 07067, USA" +}, { + "location": { + "latitude": 411733589, + "longitude": -741648093 + }, + "name": "170 Seven Lakes Drive, Sloatsburg, NY 10974, USA" +}, { + "location": { + "latitude": 412676291, + "longitude": -742606606 + }, + "name": "1270 Lakes Road, Monroe, NY 10950, USA" +}, { + "location": { + "latitude": 409224445, + "longitude": -748286738 + }, + "name": "509-535 Alphano Road, Great Meadows, NJ 07838, USA" +}, { + "location": { + "latitude": 406523420, + "longitude": -742135517 + }, + "name": "652 Garden Street, Elizabeth, NJ 07202, USA" +}, { + "location": { + "latitude": 401827388, + "longitude": -740294537 + }, + "name": "349 Sea Spray Court, Neptune City, NJ 07753, USA" +}, { + "location": { + "latitude": 410564152, + "longitude": -743685054 + }, + "name": "13-17 Stanley Street, West Milford, NJ 07480, USA" +}, { + "location": { + "latitude": 408472324, + "longitude": -740726046 + }, + "name": "47 Industrial Avenue, Teterboro, NJ 07608, USA" +}, { + "location": { + "latitude": 412452168, + "longitude": -740214052 + }, + "name": "5 White Oak Lane, Stony Point, NY 10980, USA" +}, { + "location": { + "latitude": 409146138, + "longitude": -746188906 + }, + "name": "Berkshire Valley Management Area Trail, Jefferson, NJ, USA" +}, { + "location": { + "latitude": 404701380, + "longitude": -744781745 + }, + "name": "1007 Jersey Avenue, New Brunswick, NJ 08901, USA" +}, { + "location": { + "latitude": 409642566, + "longitude": -746017679 + }, + "name": "6 East Emerald Isle Drive, Lake Hopatcong, NJ 07849, USA" +}, { + "location": { + "latitude": 408031728, + "longitude": -748645385 + }, + "name": "1358-1474 New Jersey 57, Port Murray, NJ 07865, USA" +}, { + "location": { + "latitude": 413700272, + "longitude": -742135189 + }, + "name": "367 Prospect Road, Chester, NY 10918, USA" +}, { + "location": { + "latitude": 404310607, + "longitude": -740282632 + }, + "name": "10 Simon Lake Drive, Atlantic Highlands, NJ 07716, USA" +}, { + "location": { + "latitude": 409319800, + "longitude": -746201391 + }, + "name": "11 Ward Street, Mount Arlington, NJ 07856, USA" +}, { + "location": { + "latitude": 406685311, + "longitude": -742108603 + }, + "name": "300-398 Jefferson Avenue, Elizabeth, NJ 07201, USA" +}, { + "location": { + "latitude": 419018117, + "longitude": -749142781 + }, + "name": "43 Dreher Road, Roscoe, NY 12776, USA" +}, { + "location": { + "latitude": 412856162, + "longitude": -745148837 + }, + "name": "Swan Street, Pine Island, NY 10969, USA" +}, { + "location": { + "latitude": 416560744, + "longitude": -746721964 + }, + "name": "66 Pleasantview Avenue, Monticello, NY 12701, USA" +}, { + "location": { + "latitude": 405314270, + "longitude": -749836354 + }, + "name": "" +}, { + "location": { + "latitude": 414219548, + "longitude": -743327440 + }, + "name": "" +}, { + "location": { + "latitude": 415534177, + "longitude": -742900616 + }, + "name": "565 Winding Hills Road, Montgomery, NY 12549, USA" +}, { + "location": { + "latitude": 406898530, + "longitude": -749127080 + }, + "name": "231 Rocky Run Road, Glen Gardner, NJ 08826, USA" +}, { + "location": { + "latitude": 407586880, + "longitude": -741670168 + }, + "name": "100 Mount Pleasant Avenue, Newark, NJ 07104, USA" +}, { + "location": { + "latitude": 400106455, + "longitude": -742870190 + }, + "name": "517-521 Huntington Drive, Manchester Township, NJ 08759, USA" +}, { + "location": { + "latitude": 400066188, + "longitude": -746793294 + }, + "name": "" +}, { + "location": { + "latitude": 418803880, + "longitude": -744102673 + }, + "name": "40 Mountain Road, Napanoch, NY 12458, USA" +}, { + "location": { + "latitude": 414204288, + "longitude": -747895140 + }, + "name": "" +}, { + "location": { + "latitude": 414777405, + "longitude": -740615601 + }, + "name": "" +}, { + "location": { + "latitude": 415464475, + "longitude": -747175374 + }, + "name": "48 North Road, Forestburgh, NY 12777, USA" +}, { + "location": { + "latitude": 404062378, + "longitude": -746376177 + }, + "name": "" +}, { + "location": { + "latitude": 405688272, + "longitude": -749285130 + }, + "name": "" +}, { + "location": { + "latitude": 400342070, + "longitude": -748788996 + }, + "name": "" +}, { + "location": { + "latitude": 401809022, + "longitude": -744157964 + }, + "name": "" +}, { + "location": { + "latitude": 404226644, + "longitude": -740517141 + }, + "name": "9 Thompson Avenue, Leonardo, NJ 07737, USA" +}, { + "location": { + "latitude": 410322033, + "longitude": -747871659 + }, + "name": "" +}, { + "location": { + "latitude": 407100674, + "longitude": -747742727 + }, + "name": "" +}, { + "location": { + "latitude": 418811433, + "longitude": -741718005 + }, + "name": "213 Bush Road, Stone Ridge, NY 12484, USA" +}, { + "location": { + "latitude": 415034302, + "longitude": -743850945 + }, + "name": "" +}, { + "location": { + "latitude": 411349992, + "longitude": -743694161 + }, + "name": "" +}, { + "location": { + "latitude": 404839914, + "longitude": -744759616 + }, + "name": "1-17 Bergen Court, New Brunswick, NJ 08901, USA" +}, { + "location": { + "latitude": 414638017, + "longitude": -745957854 + }, + "name": "35 Oakland Valley Road, Cuddebackville, NY 12729, USA" +}, { + "location": { + "latitude": 412127800, + "longitude": -740173578 + }, + "name": "" +}, { + "location": { + "latitude": 401263460, + "longitude": -747964303 + }, + "name": "" +}, { + "location": { + "latitude": 412843391, + "longitude": -749086026 + }, + "name": "" +}, { + "location": { + "latitude": 418512773, + "longitude": -743067823 + }, + "name": "" +}, { + "location": { + "latitude": 404318328, + "longitude": -740835638 + }, + "name": "42-102 Main Street, Belford, NJ 07718, USA" +}, { + "location": { + "latitude": 419020746, + "longitude": -741172328 + }, + "name": "" +}, { + "location": { + "latitude": 404080723, + "longitude": -746119569 + }, + "name": "" +}, { + "location": { + "latitude": 401012643, + "longitude": -744035134 + }, + "name": "" +}, { + "location": { + "latitude": 404306372, + "longitude": -741079661 + }, + "name": "" +}, { + "location": { + "latitude": 403966326, + "longitude": -748519297 + }, + "name": "" +}, { + "location": { + "latitude": 405002031, + "longitude": -748407866 + }, + "name": "" +}, { + "location": { + "latitude": 409532885, + "longitude": -742200683 + }, + "name": "" +}, { + "location": { + "latitude": 416851321, + "longitude": -742674555 + }, + "name": "" +}, { + "location": { + "latitude": 406411633, + "longitude": -741722051 + }, + "name": "3387 Richmond Terrace, Staten Island, NY 10303, USA" +}, { + "location": { + "latitude": 413069058, + "longitude": -744597778 + }, + "name": "261 Van Sickle Road, Goshen, NY 10924, USA" +}, { + "location": { + "latitude": 418465462, + "longitude": -746859398 + }, + "name": "" +}, { + "location": { + "latitude": 411733222, + "longitude": -744228360 + }, + "name": "" +}, { + "location": { + "latitude": 410248224, + "longitude": -747127767 + }, + "name": "3 Hasta Way, Newton, NJ 07860, USA" +}] diff --git a/examples/routeguide/dynamic_codegen/route_guide_server.js b/examples/routeguide/dynamic_codegen/route_guide_server.js new file mode 100644 index 000000000..a303b825b --- /dev/null +++ b/examples/routeguide/dynamic_codegen/route_guide_server.js @@ -0,0 +1,245 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +var PROTO_PATH = __dirname + '/../../protos/route_guide.proto'; + +var fs = require('fs'); +var parseArgs = require('minimist'); +var path = require('path'); +var _ = require('lodash'); +var grpc = require('@grpc/grpc-js'); +var protoLoader = require('@grpc/proto-loader'); +var packageDefinition = protoLoader.loadSync( + PROTO_PATH, + {keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true + }); +var routeguide = grpc.loadPackageDefinition(packageDefinition).routeguide; + +var COORD_FACTOR = 1e7; + +/** + * For simplicity, a point is a record type that looks like + * {latitude: number, longitude: number}, and a feature is a record type that + * looks like {name: string, location: point}. feature objects with name==='' + * are points with no feature. + */ + +/** + * List of feature objects at points that have been requested so far. + */ +var feature_list = []; + +/** + * Get a feature object at the given point, or creates one if it does not exist. + * @param {point} point The point to check + * @return {feature} The feature object at the point. Note that an empty name + * indicates no feature + */ +function checkFeature(point) { + var feature; + // Check if there is already a feature object for the given point + for (var i = 0; i < feature_list.length; i++) { + feature = feature_list[i]; + if (feature.location.latitude === point.latitude && + feature.location.longitude === point.longitude) { + return feature; + } + } + var name = ''; + feature = { + name: name, + location: point + }; + return feature; +} + +/** + * getFeature request handler. Gets a request with a point, and responds with a + * feature object indicating whether there is a feature at that point. + * @param {EventEmitter} call Call object for the handler to process + * @param {function(Error, feature)} callback Response callback + */ +function getFeature(call, callback) { + callback(null, checkFeature(call.request)); +} + +/** + * listFeatures request handler. Gets a request with two points, and responds + * with a stream of all features in the bounding box defined by those points. + * @param {Writable} call Writable stream for responses with an additional + * request property for the request value. + */ +function listFeatures(call) { + var lo = call.request.lo; + var hi = call.request.hi; + var left = _.min([lo.longitude, hi.longitude]); + var right = _.max([lo.longitude, hi.longitude]); + var top = _.max([lo.latitude, hi.latitude]); + var bottom = _.min([lo.latitude, hi.latitude]); + // For each feature, check if it is in the given bounding box + _.each(feature_list, function(feature) { + if (feature.name === '') { + return; + } + if (feature.location.longitude >= left && + feature.location.longitude <= right && + feature.location.latitude >= bottom && + feature.location.latitude <= top) { + call.write(feature); + } + }); + call.end(); +} + +/** + * Calculate the distance between two points using the "haversine" formula. + * The formula is based on http://mathforum.org/library/drmath/view/51879.html. + * @param start The starting point + * @param end The end point + * @return The distance between the points in meters + */ +function getDistance(start, end) { + function toRadians(num) { + return num * Math.PI / 180; + } + var R = 6371000; // earth radius in metres + var lat1 = toRadians(start.latitude / COORD_FACTOR); + var lat2 = toRadians(end.latitude / COORD_FACTOR); + var lon1 = toRadians(start.longitude / COORD_FACTOR); + var lon2 = toRadians(end.longitude / COORD_FACTOR); + + var deltalat = lat2-lat1; + var deltalon = lon2-lon1; + var a = Math.sin(deltalat/2) * Math.sin(deltalat/2) + + Math.cos(lat1) * Math.cos(lat2) * + Math.sin(deltalon/2) * Math.sin(deltalon/2); + var c = 2 * Math.atan2(Math.sqrt(a), Math.sqrt(1-a)); + return R * c; +} + +/** + * recordRoute handler. Gets a stream of points, and responds with statistics + * about the "trip": number of points, number of known features visited, total + * distance traveled, and total time spent. + * @param {Readable} call The request point stream. + * @param {function(Error, routeSummary)} callback The callback to pass the + * response to + */ +function recordRoute(call, callback) { + var point_count = 0; + var feature_count = 0; + var distance = 0; + var previous = null; + // Start a timer + var start_time = process.hrtime(); + call.on('data', function(point) { + point_count += 1; + if (checkFeature(point).name !== '') { + feature_count += 1; + } + /* For each point after the first, add the incremental distance from the + * previous point to the total distance value */ + if (previous != null) { + distance += getDistance(previous, point); + } + previous = point; + }); + call.on('end', function() { + callback(null, { + point_count: point_count, + feature_count: feature_count, + // Cast the distance to an integer + distance: distance|0, + // End the timer + elapsed_time: process.hrtime(start_time)[0] + }); + }); +} + +var route_notes = {}; + +/** + * Turn the point into a dictionary key. + * @param {point} point The point to use + * @return {string} The key for an object + */ +function pointKey(point) { + return point.latitude + ' ' + point.longitude; +} + +/** + * routeChat handler. Receives a stream of message/location pairs, and responds + * with a stream of all previous messages at each of those locations. + * @param {Duplex} call The stream for incoming and outgoing messages + */ +function routeChat(call) { + call.on('data', function(note) { + var key = pointKey(note.location); + /* For each note sent, respond with all previous notes that correspond to + * the same point */ + if (route_notes.hasOwnProperty(key)) { + _.each(route_notes[key], function(note) { + call.write(note); + }); + } else { + route_notes[key] = []; + } + // Then add the new note to the list + route_notes[key].push(JSON.parse(JSON.stringify(note))); + }); + call.on('end', function() { + call.end(); + }); +} + +/** + * Get a new server with the handler functions in this file bound to the methods + * it serves. + * @return {Server} The new server object + */ +function getServer() { + var server = new grpc.Server(); + server.addService(routeguide.RouteGuide.service, { + getFeature: getFeature, + listFeatures: listFeatures, + recordRoute: recordRoute, + routeChat: routeChat + }); + return server; +} + +if (require.main === module) { + // If this is run as a script, start a server on an unused port + var routeServer = getServer(); + routeServer.bindAsync('0.0.0.0:50051', grpc.ServerCredentials.createInsecure(), () => { + var argv = parseArgs(process.argv, { + string: 'db_path' + }); + fs.readFile(path.resolve(argv.db_path), function(err, data) { + if (err) throw err; + feature_list = JSON.parse(data); + routeServer.start(); + }); + }); +} + +exports.getServer = getServer; diff --git a/examples/routeguide/static_codegen/README.md b/examples/routeguide/static_codegen/README.md new file mode 100644 index 000000000..f154f7e46 --- /dev/null +++ b/examples/routeguide/static_codegen/README.md @@ -0,0 +1,7 @@ +This is the static code generation variant of the Route Guide example. Code in these examples is pre-generated using protoc and the Node gRPC protoc plugin, and the generated code can be found in various `*_pb.js` files. The command line sequence for generating those files is as follows (assuming that `protoc` and `grpc_node_plugin` are present, and starting in the directory which contains this README.md file): + +```sh +cd ../protos +npm install -g grpc-tools +grpc_tools_node_protoc --js_out=import_style=commonjs,binary:../routeguide/static_codegen/ --grpc_out=grpc_js:../routeguide/static_codegen/ route_guide.proto +``` diff --git a/examples/routeguide/static_codegen/route_guide_client.js b/examples/routeguide/static_codegen/route_guide_client.js new file mode 100644 index 000000000..0ab40a8a5 --- /dev/null +++ b/examples/routeguide/static_codegen/route_guide_client.js @@ -0,0 +1,237 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +var messages = require('./route_guide_pb'); +var services = require('./route_guide_grpc_pb'); + +var async = require('async'); +var fs = require('fs'); +var parseArgs = require('minimist'); +var path = require('path'); +var _ = require('lodash'); +var grpc = require('@grpc/grpc-js'); + +var client = new services.RouteGuideClient('localhost:50051', + grpc.credentials.createInsecure()); + +var COORD_FACTOR = 1e7; + +/** + * Run the getFeature demo. Calls getFeature with a point known to have a + * feature and a point known not to have a feature. + * @param {function} callback Called when this demo is complete + */ +function runGetFeature(callback) { + var next = _.after(2, callback); + function featureCallback(error, feature) { + if (error) { + callback(error); + return; + } + var latitude = feature.getLocation().getLatitude(); + var longitude = feature.getLocation().getLongitude(); + if (feature.getName() === '') { + console.log('Found no feature at ' + + latitude/COORD_FACTOR + ', ' + longitude/COORD_FACTOR); + } else { + console.log('Found feature called "' + feature.getName() + '" at ' + + latitude/COORD_FACTOR + ', ' + longitude/COORD_FACTOR); + } + next(); + } + var point1 = new messages.Point(); + point1.setLatitude(409146138); + point1.setLongitude(-746188906); + var point2 = new messages.Point(); + point2.setLatitude(0); + point2.setLongitude(0); + client.getFeature(point1, featureCallback); + client.getFeature(point2, featureCallback); +} + +/** + * Run the listFeatures demo. Calls listFeatures with a rectangle containing all + * of the features in the pre-generated database. Prints each response as it + * comes in. + * @param {function} callback Called when this demo is complete + */ +function runListFeatures(callback) { + var rect = new messages.Rectangle(); + var lo = new messages.Point(); + lo.setLatitude(400000000); + lo.setLongitude(-750000000); + rect.setLo(lo); + var hi = new messages.Point(); + hi.setLatitude(420000000); + hi.setLongitude(-730000000); + rect.setHi(hi); + console.log('Looking for features between 40, -75 and 42, -73'); + var call = client.listFeatures(rect); + call.on('data', function(feature) { + console.log('Found feature called "' + feature.getName() + '" at ' + + feature.getLocation().getLatitude()/COORD_FACTOR + ', ' + + feature.getLocation().getLongitude()/COORD_FACTOR); + }); + call.on('end', callback); +} + +/** + * Run the recordRoute demo. Sends several randomly chosen points from the + * pre-generated feature database with a variable delay in between. Prints the + * statistics when they are sent from the server. + * @param {function} callback Called when this demo is complete + */ +function runRecordRoute(callback) { + var argv = parseArgs(process.argv, { + string: 'db_path' + }); + fs.readFile(path.resolve(argv.db_path), function(err, data) { + if (err) { + callback(err); + return; + } + // Transform the loaded features to Feature objects + var feature_list = _.map(JSON.parse(data), function(value) { + var feature = new messages.Feature(); + feature.setName(value.name); + var location = new messages.Point(); + location.setLatitude(value.location.latitude); + location.setLongitude(value.location.longitude); + feature.setLocation(location); + return feature; + }); + + var num_points = 10; + var call = client.recordRoute(function(error, stats) { + if (error) { + callback(error); + return; + } + console.log('Finished trip with', stats.getPointCount(), 'points'); + console.log('Passed', stats.getFeatureCount(), 'features'); + console.log('Travelled', stats.getDistance(), 'meters'); + console.log('It took', stats.getElapsedTime(), 'seconds'); + callback(); + }); + /** + * Constructs a function that asynchronously sends the given point and then + * delays sending its callback + * @param {messages.Point} location The point to send + * @return {function(function)} The function that sends the point + */ + function pointSender(location) { + /** + * Sends the point, then calls the callback after a delay + * @param {function} callback Called when complete + */ + return function(callback) { + console.log('Visiting point ' + location.getLatitude()/COORD_FACTOR + + ', ' + location.getLongitude()/COORD_FACTOR); + call.write(location); + _.delay(callback, _.random(500, 1500)); + }; + } + var point_senders = []; + for (var i = 0; i < num_points; i++) { + var rand_point = feature_list[_.random(0, feature_list.length - 1)]; + point_senders[i] = pointSender(rand_point.getLocation()); + } + async.series(point_senders, function() { + call.end(); + }); + }); +} + +/** + * Run the routeChat demo. Send some chat messages, and print any chat messages + * that are sent from the server. + * @param {function} callback Called when the demo is complete + */ +function runRouteChat(callback) { + var call = client.routeChat(); + call.on('data', function(note) { + console.log('Got message "' + note.getMessage() + '" at ' + + note.getLocation().getLatitude() + ', ' + + note.getLocation().getLongitude()); + }); + + call.on('end', callback); + + var notes = [{ + location: { + latitude: 0, + longitude: 0 + }, + message: 'First message' + }, { + location: { + latitude: 0, + longitude: 1 + }, + message: 'Second message' + }, { + location: { + latitude: 1, + longitude: 0 + }, + message: 'Third message' + }, { + location: { + latitude: 0, + longitude: 0 + }, + message: 'Fourth message' + }]; + for (var i = 0; i < notes.length; i++) { + var note = notes[i]; + console.log('Sending message "' + note.message + '" at ' + + note.location.latitude + ', ' + note.location.longitude); + var noteMsg = new messages.RouteNote(); + noteMsg.setMessage(note.message); + var location = new messages.Point(); + location.setLatitude(note.location.latitude); + location.setLongitude(note.location.longitude); + noteMsg.setLocation(location); + call.write(noteMsg); + } + call.end(); +} + +/** + * Run all of the demos in order + */ +function main() { + async.series([ + runGetFeature, + runListFeatures, + runRecordRoute, + runRouteChat + ]); +} + +if (require.main === module) { + main(); +} + +exports.runGetFeature = runGetFeature; + +exports.runListFeatures = runListFeatures; + +exports.runRecordRoute = runRecordRoute; + +exports.runRouteChat = runRouteChat; diff --git a/examples/routeguide/static_codegen/route_guide_db.json b/examples/routeguide/static_codegen/route_guide_db.json new file mode 100644 index 000000000..9d6a980ab --- /dev/null +++ b/examples/routeguide/static_codegen/route_guide_db.json @@ -0,0 +1,601 @@ +[{ + "location": { + "latitude": 407838351, + "longitude": -746143763 + }, + "name": "Patriots Path, Mendham, NJ 07945, USA" +}, { + "location": { + "latitude": 408122808, + "longitude": -743999179 + }, + "name": "101 New Jersey 10, Whippany, NJ 07981, USA" +}, { + "location": { + "latitude": 413628156, + "longitude": -749015468 + }, + "name": "U.S. 6, Shohola, PA 18458, USA" +}, { + "location": { + "latitude": 419999544, + "longitude": -740371136 + }, + "name": "5 Conners Road, Kingston, NY 12401, USA" +}, { + "location": { + "latitude": 414008389, + "longitude": -743951297 + }, + "name": "Mid Hudson Psychiatric Center, New Hampton, NY 10958, USA" +}, { + "location": { + "latitude": 419611318, + "longitude": -746524769 + }, + "name": "287 Flugertown Road, Livingston Manor, NY 12758, USA" +}, { + "location": { + "latitude": 406109563, + "longitude": -742186778 + }, + "name": "4001 Tremley Point Road, Linden, NJ 07036, USA" +}, { + "location": { + "latitude": 416802456, + "longitude": -742370183 + }, + "name": "352 South Mountain Road, Wallkill, NY 12589, USA" +}, { + "location": { + "latitude": 412950425, + "longitude": -741077389 + }, + "name": "Bailey Turn Road, Harriman, NY 10926, USA" +}, { + "location": { + "latitude": 412144655, + "longitude": -743949739 + }, + "name": "193-199 Wawayanda Road, Hewitt, NJ 07421, USA" +}, { + "location": { + "latitude": 415736605, + "longitude": -742847522 + }, + "name": "406-496 Ward Avenue, Pine Bush, NY 12566, USA" +}, { + "location": { + "latitude": 413843930, + "longitude": -740501726 + }, + "name": "162 Merrill Road, Highland Mills, NY 10930, USA" +}, { + "location": { + "latitude": 410873075, + "longitude": -744459023 + }, + "name": "Clinton Road, West Milford, NJ 07480, USA" +}, { + "location": { + "latitude": 412346009, + "longitude": -744026814 + }, + "name": "16 Old Brook Lane, Warwick, NY 10990, USA" +}, { + "location": { + "latitude": 402948455, + "longitude": -747903913 + }, + "name": "3 Drake Lane, Pennington, NJ 08534, USA" +}, { + "location": { + "latitude": 406337092, + "longitude": -740122226 + }, + "name": "6324 8th Avenue, Brooklyn, NY 11220, USA" +}, { + "location": { + "latitude": 406421967, + "longitude": -747727624 + }, + "name": "1 Merck Access Road, Whitehouse Station, NJ 08889, USA" +}, { + "location": { + "latitude": 416318082, + "longitude": -749677716 + }, + "name": "78-98 Schalck Road, Narrowsburg, NY 12764, USA" +}, { + "location": { + "latitude": 415301720, + "longitude": -748416257 + }, + "name": "282 Lakeview Drive Road, Highland Lake, NY 12743, USA" +}, { + "location": { + "latitude": 402647019, + "longitude": -747071791 + }, + "name": "330 Evelyn Avenue, Hamilton Township, NJ 08619, USA" +}, { + "location": { + "latitude": 412567807, + "longitude": -741058078 + }, + "name": "New York State Reference Route 987E, Southfields, NY 10975, USA" +}, { + "location": { + "latitude": 416855156, + "longitude": -744420597 + }, + "name": "103-271 Tempaloni Road, Ellenville, NY 12428, USA" +}, { + "location": { + "latitude": 404663628, + "longitude": -744820157 + }, + "name": "1300 Airport Road, North Brunswick Township, NJ 08902, USA" +}, { + "location": { + "latitude": 407113723, + "longitude": -749746483 + }, + "name": "" +}, { + "location": { + "latitude": 402133926, + "longitude": -743613249 + }, + "name": "" +}, { + "location": { + "latitude": 400273442, + "longitude": -741220915 + }, + "name": "" +}, { + "location": { + "latitude": 411236786, + "longitude": -744070769 + }, + "name": "" +}, { + "location": { + "latitude": 411633782, + "longitude": -746784970 + }, + "name": "211-225 Plains Road, Augusta, NJ 07822, USA" +}, { + "location": { + "latitude": 415830701, + "longitude": -742952812 + }, + "name": "" +}, { + "location": { + "latitude": 413447164, + "longitude": -748712898 + }, + "name": "165 Pedersen Ridge Road, Milford, PA 18337, USA" +}, { + "location": { + "latitude": 405047245, + "longitude": -749800722 + }, + "name": "100-122 Locktown Road, Frenchtown, NJ 08825, USA" +}, { + "location": { + "latitude": 418858923, + "longitude": -746156790 + }, + "name": "" +}, { + "location": { + "latitude": 417951888, + "longitude": -748484944 + }, + "name": "650-652 Willi Hill Road, Swan Lake, NY 12783, USA" +}, { + "location": { + "latitude": 407033786, + "longitude": -743977337 + }, + "name": "26 East 3rd Street, New Providence, NJ 07974, USA" +}, { + "location": { + "latitude": 417548014, + "longitude": -740075041 + }, + "name": "" +}, { + "location": { + "latitude": 410395868, + "longitude": -744972325 + }, + "name": "" +}, { + "location": { + "latitude": 404615353, + "longitude": -745129803 + }, + "name": "" +}, { + "location": { + "latitude": 406589790, + "longitude": -743560121 + }, + "name": "611 Lawrence Avenue, Westfield, NJ 07090, USA" +}, { + "location": { + "latitude": 414653148, + "longitude": -740477477 + }, + "name": "18 Lannis Avenue, New Windsor, NY 12553, USA" +}, { + "location": { + "latitude": 405957808, + "longitude": -743255336 + }, + "name": "82-104 Amherst Avenue, Colonia, NJ 07067, USA" +}, { + "location": { + "latitude": 411733589, + "longitude": -741648093 + }, + "name": "170 Seven Lakes Drive, Sloatsburg, NY 10974, USA" +}, { + "location": { + "latitude": 412676291, + "longitude": -742606606 + }, + "name": "1270 Lakes Road, Monroe, NY 10950, USA" +}, { + "location": { + "latitude": 409224445, + "longitude": -748286738 + }, + "name": "509-535 Alphano Road, Great Meadows, NJ 07838, USA" +}, { + "location": { + "latitude": 406523420, + "longitude": -742135517 + }, + "name": "652 Garden Street, Elizabeth, NJ 07202, USA" +}, { + "location": { + "latitude": 401827388, + "longitude": -740294537 + }, + "name": "349 Sea Spray Court, Neptune City, NJ 07753, USA" +}, { + "location": { + "latitude": 410564152, + "longitude": -743685054 + }, + "name": "13-17 Stanley Street, West Milford, NJ 07480, USA" +}, { + "location": { + "latitude": 408472324, + "longitude": -740726046 + }, + "name": "47 Industrial Avenue, Teterboro, NJ 07608, USA" +}, { + "location": { + "latitude": 412452168, + "longitude": -740214052 + }, + "name": "5 White Oak Lane, Stony Point, NY 10980, USA" +}, { + "location": { + "latitude": 409146138, + "longitude": -746188906 + }, + "name": "Berkshire Valley Management Area Trail, Jefferson, NJ, USA" +}, { + "location": { + "latitude": 404701380, + "longitude": -744781745 + }, + "name": "1007 Jersey Avenue, New Brunswick, NJ 08901, USA" +}, { + "location": { + "latitude": 409642566, + "longitude": -746017679 + }, + "name": "6 East Emerald Isle Drive, Lake Hopatcong, NJ 07849, USA" +}, { + "location": { + "latitude": 408031728, + "longitude": -748645385 + }, + "name": "1358-1474 New Jersey 57, Port Murray, NJ 07865, USA" +}, { + "location": { + "latitude": 413700272, + "longitude": -742135189 + }, + "name": "367 Prospect Road, Chester, NY 10918, USA" +}, { + "location": { + "latitude": 404310607, + "longitude": -740282632 + }, + "name": "10 Simon Lake Drive, Atlantic Highlands, NJ 07716, USA" +}, { + "location": { + "latitude": 409319800, + "longitude": -746201391 + }, + "name": "11 Ward Street, Mount Arlington, NJ 07856, USA" +}, { + "location": { + "latitude": 406685311, + "longitude": -742108603 + }, + "name": "300-398 Jefferson Avenue, Elizabeth, NJ 07201, USA" +}, { + "location": { + "latitude": 419018117, + "longitude": -749142781 + }, + "name": "43 Dreher Road, Roscoe, NY 12776, USA" +}, { + "location": { + "latitude": 412856162, + "longitude": -745148837 + }, + "name": "Swan Street, Pine Island, NY 10969, USA" +}, { + "location": { + "latitude": 416560744, + "longitude": -746721964 + }, + "name": "66 Pleasantview Avenue, Monticello, NY 12701, USA" +}, { + "location": { + "latitude": 405314270, + "longitude": -749836354 + }, + "name": "" +}, { + "location": { + "latitude": 414219548, + "longitude": -743327440 + }, + "name": "" +}, { + "location": { + "latitude": 415534177, + "longitude": -742900616 + }, + "name": "565 Winding Hills Road, Montgomery, NY 12549, USA" +}, { + "location": { + "latitude": 406898530, + "longitude": -749127080 + }, + "name": "231 Rocky Run Road, Glen Gardner, NJ 08826, USA" +}, { + "location": { + "latitude": 407586880, + "longitude": -741670168 + }, + "name": "100 Mount Pleasant Avenue, Newark, NJ 07104, USA" +}, { + "location": { + "latitude": 400106455, + "longitude": -742870190 + }, + "name": "517-521 Huntington Drive, Manchester Township, NJ 08759, USA" +}, { + "location": { + "latitude": 400066188, + "longitude": -746793294 + }, + "name": "" +}, { + "location": { + "latitude": 418803880, + "longitude": -744102673 + }, + "name": "40 Mountain Road, Napanoch, NY 12458, USA" +}, { + "location": { + "latitude": 414204288, + "longitude": -747895140 + }, + "name": "" +}, { + "location": { + "latitude": 414777405, + "longitude": -740615601 + }, + "name": "" +}, { + "location": { + "latitude": 415464475, + "longitude": -747175374 + }, + "name": "48 North Road, Forestburgh, NY 12777, USA" +}, { + "location": { + "latitude": 404062378, + "longitude": -746376177 + }, + "name": "" +}, { + "location": { + "latitude": 405688272, + "longitude": -749285130 + }, + "name": "" +}, { + "location": { + "latitude": 400342070, + "longitude": -748788996 + }, + "name": "" +}, { + "location": { + "latitude": 401809022, + "longitude": -744157964 + }, + "name": "" +}, { + "location": { + "latitude": 404226644, + "longitude": -740517141 + }, + "name": "9 Thompson Avenue, Leonardo, NJ 07737, USA" +}, { + "location": { + "latitude": 410322033, + "longitude": -747871659 + }, + "name": "" +}, { + "location": { + "latitude": 407100674, + "longitude": -747742727 + }, + "name": "" +}, { + "location": { + "latitude": 418811433, + "longitude": -741718005 + }, + "name": "213 Bush Road, Stone Ridge, NY 12484, USA" +}, { + "location": { + "latitude": 415034302, + "longitude": -743850945 + }, + "name": "" +}, { + "location": { + "latitude": 411349992, + "longitude": -743694161 + }, + "name": "" +}, { + "location": { + "latitude": 404839914, + "longitude": -744759616 + }, + "name": "1-17 Bergen Court, New Brunswick, NJ 08901, USA" +}, { + "location": { + "latitude": 414638017, + "longitude": -745957854 + }, + "name": "35 Oakland Valley Road, Cuddebackville, NY 12729, USA" +}, { + "location": { + "latitude": 412127800, + "longitude": -740173578 + }, + "name": "" +}, { + "location": { + "latitude": 401263460, + "longitude": -747964303 + }, + "name": "" +}, { + "location": { + "latitude": 412843391, + "longitude": -749086026 + }, + "name": "" +}, { + "location": { + "latitude": 418512773, + "longitude": -743067823 + }, + "name": "" +}, { + "location": { + "latitude": 404318328, + "longitude": -740835638 + }, + "name": "42-102 Main Street, Belford, NJ 07718, USA" +}, { + "location": { + "latitude": 419020746, + "longitude": -741172328 + }, + "name": "" +}, { + "location": { + "latitude": 404080723, + "longitude": -746119569 + }, + "name": "" +}, { + "location": { + "latitude": 401012643, + "longitude": -744035134 + }, + "name": "" +}, { + "location": { + "latitude": 404306372, + "longitude": -741079661 + }, + "name": "" +}, { + "location": { + "latitude": 403966326, + "longitude": -748519297 + }, + "name": "" +}, { + "location": { + "latitude": 405002031, + "longitude": -748407866 + }, + "name": "" +}, { + "location": { + "latitude": 409532885, + "longitude": -742200683 + }, + "name": "" +}, { + "location": { + "latitude": 416851321, + "longitude": -742674555 + }, + "name": "" +}, { + "location": { + "latitude": 406411633, + "longitude": -741722051 + }, + "name": "3387 Richmond Terrace, Staten Island, NY 10303, USA" +}, { + "location": { + "latitude": 413069058, + "longitude": -744597778 + }, + "name": "261 Van Sickle Road, Goshen, NY 10924, USA" +}, { + "location": { + "latitude": 418465462, + "longitude": -746859398 + }, + "name": "" +}, { + "location": { + "latitude": 411733222, + "longitude": -744228360 + }, + "name": "" +}, { + "location": { + "latitude": 410248224, + "longitude": -747127767 + }, + "name": "3 Hasta Way, Newton, NJ 07860, USA" +}] diff --git a/examples/routeguide/static_codegen/route_guide_grpc_pb.js b/examples/routeguide/static_codegen/route_guide_grpc_pb.js new file mode 100644 index 000000000..83c839dc8 --- /dev/null +++ b/examples/routeguide/static_codegen/route_guide_grpc_pb.js @@ -0,0 +1,146 @@ +// GENERATED CODE -- DO NOT EDIT! + +// Original file comments: +// Copyright 2015 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +'use strict'; +var grpc = require('@grpc/grpc-js'); +var route_guide_pb = require('./route_guide_pb.js'); + +function serialize_routeguide_Feature(arg) { + if (!(arg instanceof route_guide_pb.Feature)) { + throw new Error('Expected argument of type routeguide.Feature'); + } + return Buffer.from(arg.serializeBinary()); +} + +function deserialize_routeguide_Feature(buffer_arg) { + return route_guide_pb.Feature.deserializeBinary(new Uint8Array(buffer_arg)); +} + +function serialize_routeguide_Point(arg) { + if (!(arg instanceof route_guide_pb.Point)) { + throw new Error('Expected argument of type routeguide.Point'); + } + return Buffer.from(arg.serializeBinary()); +} + +function deserialize_routeguide_Point(buffer_arg) { + return route_guide_pb.Point.deserializeBinary(new Uint8Array(buffer_arg)); +} + +function serialize_routeguide_Rectangle(arg) { + if (!(arg instanceof route_guide_pb.Rectangle)) { + throw new Error('Expected argument of type routeguide.Rectangle'); + } + return Buffer.from(arg.serializeBinary()); +} + +function deserialize_routeguide_Rectangle(buffer_arg) { + return route_guide_pb.Rectangle.deserializeBinary(new Uint8Array(buffer_arg)); +} + +function serialize_routeguide_RouteNote(arg) { + if (!(arg instanceof route_guide_pb.RouteNote)) { + throw new Error('Expected argument of type routeguide.RouteNote'); + } + return Buffer.from(arg.serializeBinary()); +} + +function deserialize_routeguide_RouteNote(buffer_arg) { + return route_guide_pb.RouteNote.deserializeBinary(new Uint8Array(buffer_arg)); +} + +function serialize_routeguide_RouteSummary(arg) { + if (!(arg instanceof route_guide_pb.RouteSummary)) { + throw new Error('Expected argument of type routeguide.RouteSummary'); + } + return Buffer.from(arg.serializeBinary()); +} + +function deserialize_routeguide_RouteSummary(buffer_arg) { + return route_guide_pb.RouteSummary.deserializeBinary(new Uint8Array(buffer_arg)); +} + + +// Interface exported by the server. +var RouteGuideService = exports.RouteGuideService = { + // A simple RPC. +// +// Obtains the feature at a given position. +// +// A feature with an empty name is returned if there's no feature at the given +// position. +getFeature: { + path: '/routeguide.RouteGuide/GetFeature', + requestStream: false, + responseStream: false, + requestType: route_guide_pb.Point, + responseType: route_guide_pb.Feature, + requestSerialize: serialize_routeguide_Point, + requestDeserialize: deserialize_routeguide_Point, + responseSerialize: serialize_routeguide_Feature, + responseDeserialize: deserialize_routeguide_Feature, + }, + // A server-to-client streaming RPC. +// +// Obtains the Features available within the given Rectangle. Results are +// streamed rather than returned at once (e.g. in a response message with a +// repeated field), as the rectangle may cover a large area and contain a +// huge number of features. +listFeatures: { + path: '/routeguide.RouteGuide/ListFeatures', + requestStream: false, + responseStream: true, + requestType: route_guide_pb.Rectangle, + responseType: route_guide_pb.Feature, + requestSerialize: serialize_routeguide_Rectangle, + requestDeserialize: deserialize_routeguide_Rectangle, + responseSerialize: serialize_routeguide_Feature, + responseDeserialize: deserialize_routeguide_Feature, + }, + // A client-to-server streaming RPC. +// +// Accepts a stream of Points on a route being traversed, returning a +// RouteSummary when traversal is completed. +recordRoute: { + path: '/routeguide.RouteGuide/RecordRoute', + requestStream: true, + responseStream: false, + requestType: route_guide_pb.Point, + responseType: route_guide_pb.RouteSummary, + requestSerialize: serialize_routeguide_Point, + requestDeserialize: deserialize_routeguide_Point, + responseSerialize: serialize_routeguide_RouteSummary, + responseDeserialize: deserialize_routeguide_RouteSummary, + }, + // A Bidirectional streaming RPC. +// +// Accepts a stream of RouteNotes sent while a route is being traversed, +// while receiving other RouteNotes (e.g. from other users). +routeChat: { + path: '/routeguide.RouteGuide/RouteChat', + requestStream: true, + responseStream: true, + requestType: route_guide_pb.RouteNote, + responseType: route_guide_pb.RouteNote, + requestSerialize: serialize_routeguide_RouteNote, + requestDeserialize: deserialize_routeguide_RouteNote, + responseSerialize: serialize_routeguide_RouteNote, + responseDeserialize: deserialize_routeguide_RouteNote, + }, +}; + +exports.RouteGuideClient = grpc.makeGenericClientConstructor(RouteGuideService); diff --git a/examples/routeguide/static_codegen/route_guide_pb.js b/examples/routeguide/static_codegen/route_guide_pb.js new file mode 100644 index 000000000..a032bec4c --- /dev/null +++ b/examples/routeguide/static_codegen/route_guide_pb.js @@ -0,0 +1,1069 @@ +// source: route_guide.proto +/** + * @fileoverview + * @enhanceable + * @suppress {messageConventions} JS Compiler reports an error if a variable or + * field starts with 'MSG_' and isn't a translatable message. + * @public + */ +// GENERATED CODE -- DO NOT EDIT! + +var jspb = require('google-protobuf'); +var goog = jspb; +var global = Function('return this')(); + +goog.exportSymbol('proto.routeguide.Feature', null, global); +goog.exportSymbol('proto.routeguide.Point', null, global); +goog.exportSymbol('proto.routeguide.Rectangle', null, global); +goog.exportSymbol('proto.routeguide.RouteNote', null, global); +goog.exportSymbol('proto.routeguide.RouteSummary', null, global); +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.routeguide.Point = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.routeguide.Point, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.routeguide.Point.displayName = 'proto.routeguide.Point'; +} +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.routeguide.Rectangle = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.routeguide.Rectangle, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.routeguide.Rectangle.displayName = 'proto.routeguide.Rectangle'; +} +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.routeguide.Feature = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.routeguide.Feature, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.routeguide.Feature.displayName = 'proto.routeguide.Feature'; +} +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.routeguide.RouteNote = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.routeguide.RouteNote, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.routeguide.RouteNote.displayName = 'proto.routeguide.RouteNote'; +} +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.routeguide.RouteSummary = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.routeguide.RouteSummary, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.routeguide.RouteSummary.displayName = 'proto.routeguide.RouteSummary'; +} + + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.routeguide.Point.prototype.toObject = function(opt_includeInstance) { + return proto.routeguide.Point.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.routeguide.Point} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.routeguide.Point.toObject = function(includeInstance, msg) { + var f, obj = { + latitude: jspb.Message.getFieldWithDefault(msg, 1, 0), + longitude: jspb.Message.getFieldWithDefault(msg, 2, 0) + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.routeguide.Point} + */ +proto.routeguide.Point.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.routeguide.Point; + return proto.routeguide.Point.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.routeguide.Point} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.routeguide.Point} + */ +proto.routeguide.Point.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = /** @type {number} */ (reader.readInt32()); + msg.setLatitude(value); + break; + case 2: + var value = /** @type {number} */ (reader.readInt32()); + msg.setLongitude(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.routeguide.Point.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.routeguide.Point.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.routeguide.Point} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.routeguide.Point.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getLatitude(); + if (f !== 0) { + writer.writeInt32( + 1, + f + ); + } + f = message.getLongitude(); + if (f !== 0) { + writer.writeInt32( + 2, + f + ); + } +}; + + +/** + * optional int32 latitude = 1; + * @return {number} + */ +proto.routeguide.Point.prototype.getLatitude = function() { + return /** @type {number} */ (jspb.Message.getFieldWithDefault(this, 1, 0)); +}; + + +/** + * @param {number} value + * @return {!proto.routeguide.Point} returns this + */ +proto.routeguide.Point.prototype.setLatitude = function(value) { + return jspb.Message.setProto3IntField(this, 1, value); +}; + + +/** + * optional int32 longitude = 2; + * @return {number} + */ +proto.routeguide.Point.prototype.getLongitude = function() { + return /** @type {number} */ (jspb.Message.getFieldWithDefault(this, 2, 0)); +}; + + +/** + * @param {number} value + * @return {!proto.routeguide.Point} returns this + */ +proto.routeguide.Point.prototype.setLongitude = function(value) { + return jspb.Message.setProto3IntField(this, 2, value); +}; + + + + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.routeguide.Rectangle.prototype.toObject = function(opt_includeInstance) { + return proto.routeguide.Rectangle.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.routeguide.Rectangle} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.routeguide.Rectangle.toObject = function(includeInstance, msg) { + var f, obj = { + lo: (f = msg.getLo()) && proto.routeguide.Point.toObject(includeInstance, f), + hi: (f = msg.getHi()) && proto.routeguide.Point.toObject(includeInstance, f) + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.routeguide.Rectangle} + */ +proto.routeguide.Rectangle.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.routeguide.Rectangle; + return proto.routeguide.Rectangle.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.routeguide.Rectangle} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.routeguide.Rectangle} + */ +proto.routeguide.Rectangle.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = new proto.routeguide.Point; + reader.readMessage(value,proto.routeguide.Point.deserializeBinaryFromReader); + msg.setLo(value); + break; + case 2: + var value = new proto.routeguide.Point; + reader.readMessage(value,proto.routeguide.Point.deserializeBinaryFromReader); + msg.setHi(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.routeguide.Rectangle.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.routeguide.Rectangle.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.routeguide.Rectangle} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.routeguide.Rectangle.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getLo(); + if (f != null) { + writer.writeMessage( + 1, + f, + proto.routeguide.Point.serializeBinaryToWriter + ); + } + f = message.getHi(); + if (f != null) { + writer.writeMessage( + 2, + f, + proto.routeguide.Point.serializeBinaryToWriter + ); + } +}; + + +/** + * optional Point lo = 1; + * @return {?proto.routeguide.Point} + */ +proto.routeguide.Rectangle.prototype.getLo = function() { + return /** @type{?proto.routeguide.Point} */ ( + jspb.Message.getWrapperField(this, proto.routeguide.Point, 1)); +}; + + +/** + * @param {?proto.routeguide.Point|undefined} value + * @return {!proto.routeguide.Rectangle} returns this +*/ +proto.routeguide.Rectangle.prototype.setLo = function(value) { + return jspb.Message.setWrapperField(this, 1, value); +}; + + +/** + * Clears the message field making it undefined. + * @return {!proto.routeguide.Rectangle} returns this + */ +proto.routeguide.Rectangle.prototype.clearLo = function() { + return this.setLo(undefined); +}; + + +/** + * Returns whether this field is set. + * @return {boolean} + */ +proto.routeguide.Rectangle.prototype.hasLo = function() { + return jspb.Message.getField(this, 1) != null; +}; + + +/** + * optional Point hi = 2; + * @return {?proto.routeguide.Point} + */ +proto.routeguide.Rectangle.prototype.getHi = function() { + return /** @type{?proto.routeguide.Point} */ ( + jspb.Message.getWrapperField(this, proto.routeguide.Point, 2)); +}; + + +/** + * @param {?proto.routeguide.Point|undefined} value + * @return {!proto.routeguide.Rectangle} returns this +*/ +proto.routeguide.Rectangle.prototype.setHi = function(value) { + return jspb.Message.setWrapperField(this, 2, value); +}; + + +/** + * Clears the message field making it undefined. + * @return {!proto.routeguide.Rectangle} returns this + */ +proto.routeguide.Rectangle.prototype.clearHi = function() { + return this.setHi(undefined); +}; + + +/** + * Returns whether this field is set. + * @return {boolean} + */ +proto.routeguide.Rectangle.prototype.hasHi = function() { + return jspb.Message.getField(this, 2) != null; +}; + + + + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.routeguide.Feature.prototype.toObject = function(opt_includeInstance) { + return proto.routeguide.Feature.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.routeguide.Feature} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.routeguide.Feature.toObject = function(includeInstance, msg) { + var f, obj = { + name: jspb.Message.getFieldWithDefault(msg, 1, ""), + location: (f = msg.getLocation()) && proto.routeguide.Point.toObject(includeInstance, f) + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.routeguide.Feature} + */ +proto.routeguide.Feature.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.routeguide.Feature; + return proto.routeguide.Feature.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.routeguide.Feature} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.routeguide.Feature} + */ +proto.routeguide.Feature.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = /** @type {string} */ (reader.readString()); + msg.setName(value); + break; + case 2: + var value = new proto.routeguide.Point; + reader.readMessage(value,proto.routeguide.Point.deserializeBinaryFromReader); + msg.setLocation(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.routeguide.Feature.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.routeguide.Feature.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.routeguide.Feature} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.routeguide.Feature.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getName(); + if (f.length > 0) { + writer.writeString( + 1, + f + ); + } + f = message.getLocation(); + if (f != null) { + writer.writeMessage( + 2, + f, + proto.routeguide.Point.serializeBinaryToWriter + ); + } +}; + + +/** + * optional string name = 1; + * @return {string} + */ +proto.routeguide.Feature.prototype.getName = function() { + return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, "")); +}; + + +/** + * @param {string} value + * @return {!proto.routeguide.Feature} returns this + */ +proto.routeguide.Feature.prototype.setName = function(value) { + return jspb.Message.setProto3StringField(this, 1, value); +}; + + +/** + * optional Point location = 2; + * @return {?proto.routeguide.Point} + */ +proto.routeguide.Feature.prototype.getLocation = function() { + return /** @type{?proto.routeguide.Point} */ ( + jspb.Message.getWrapperField(this, proto.routeguide.Point, 2)); +}; + + +/** + * @param {?proto.routeguide.Point|undefined} value + * @return {!proto.routeguide.Feature} returns this +*/ +proto.routeguide.Feature.prototype.setLocation = function(value) { + return jspb.Message.setWrapperField(this, 2, value); +}; + + +/** + * Clears the message field making it undefined. + * @return {!proto.routeguide.Feature} returns this + */ +proto.routeguide.Feature.prototype.clearLocation = function() { + return this.setLocation(undefined); +}; + + +/** + * Returns whether this field is set. + * @return {boolean} + */ +proto.routeguide.Feature.prototype.hasLocation = function() { + return jspb.Message.getField(this, 2) != null; +}; + + + + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.routeguide.RouteNote.prototype.toObject = function(opt_includeInstance) { + return proto.routeguide.RouteNote.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.routeguide.RouteNote} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.routeguide.RouteNote.toObject = function(includeInstance, msg) { + var f, obj = { + location: (f = msg.getLocation()) && proto.routeguide.Point.toObject(includeInstance, f), + message: jspb.Message.getFieldWithDefault(msg, 2, "") + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.routeguide.RouteNote} + */ +proto.routeguide.RouteNote.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.routeguide.RouteNote; + return proto.routeguide.RouteNote.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.routeguide.RouteNote} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.routeguide.RouteNote} + */ +proto.routeguide.RouteNote.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = new proto.routeguide.Point; + reader.readMessage(value,proto.routeguide.Point.deserializeBinaryFromReader); + msg.setLocation(value); + break; + case 2: + var value = /** @type {string} */ (reader.readString()); + msg.setMessage(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.routeguide.RouteNote.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.routeguide.RouteNote.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.routeguide.RouteNote} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.routeguide.RouteNote.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getLocation(); + if (f != null) { + writer.writeMessage( + 1, + f, + proto.routeguide.Point.serializeBinaryToWriter + ); + } + f = message.getMessage(); + if (f.length > 0) { + writer.writeString( + 2, + f + ); + } +}; + + +/** + * optional Point location = 1; + * @return {?proto.routeguide.Point} + */ +proto.routeguide.RouteNote.prototype.getLocation = function() { + return /** @type{?proto.routeguide.Point} */ ( + jspb.Message.getWrapperField(this, proto.routeguide.Point, 1)); +}; + + +/** + * @param {?proto.routeguide.Point|undefined} value + * @return {!proto.routeguide.RouteNote} returns this +*/ +proto.routeguide.RouteNote.prototype.setLocation = function(value) { + return jspb.Message.setWrapperField(this, 1, value); +}; + + +/** + * Clears the message field making it undefined. + * @return {!proto.routeguide.RouteNote} returns this + */ +proto.routeguide.RouteNote.prototype.clearLocation = function() { + return this.setLocation(undefined); +}; + + +/** + * Returns whether this field is set. + * @return {boolean} + */ +proto.routeguide.RouteNote.prototype.hasLocation = function() { + return jspb.Message.getField(this, 1) != null; +}; + + +/** + * optional string message = 2; + * @return {string} + */ +proto.routeguide.RouteNote.prototype.getMessage = function() { + return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 2, "")); +}; + + +/** + * @param {string} value + * @return {!proto.routeguide.RouteNote} returns this + */ +proto.routeguide.RouteNote.prototype.setMessage = function(value) { + return jspb.Message.setProto3StringField(this, 2, value); +}; + + + + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.routeguide.RouteSummary.prototype.toObject = function(opt_includeInstance) { + return proto.routeguide.RouteSummary.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.routeguide.RouteSummary} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.routeguide.RouteSummary.toObject = function(includeInstance, msg) { + var f, obj = { + pointCount: jspb.Message.getFieldWithDefault(msg, 1, 0), + featureCount: jspb.Message.getFieldWithDefault(msg, 2, 0), + distance: jspb.Message.getFieldWithDefault(msg, 3, 0), + elapsedTime: jspb.Message.getFieldWithDefault(msg, 4, 0) + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.routeguide.RouteSummary} + */ +proto.routeguide.RouteSummary.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.routeguide.RouteSummary; + return proto.routeguide.RouteSummary.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.routeguide.RouteSummary} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.routeguide.RouteSummary} + */ +proto.routeguide.RouteSummary.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = /** @type {number} */ (reader.readInt32()); + msg.setPointCount(value); + break; + case 2: + var value = /** @type {number} */ (reader.readInt32()); + msg.setFeatureCount(value); + break; + case 3: + var value = /** @type {number} */ (reader.readInt32()); + msg.setDistance(value); + break; + case 4: + var value = /** @type {number} */ (reader.readInt32()); + msg.setElapsedTime(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.routeguide.RouteSummary.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.routeguide.RouteSummary.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.routeguide.RouteSummary} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.routeguide.RouteSummary.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getPointCount(); + if (f !== 0) { + writer.writeInt32( + 1, + f + ); + } + f = message.getFeatureCount(); + if (f !== 0) { + writer.writeInt32( + 2, + f + ); + } + f = message.getDistance(); + if (f !== 0) { + writer.writeInt32( + 3, + f + ); + } + f = message.getElapsedTime(); + if (f !== 0) { + writer.writeInt32( + 4, + f + ); + } +}; + + +/** + * optional int32 point_count = 1; + * @return {number} + */ +proto.routeguide.RouteSummary.prototype.getPointCount = function() { + return /** @type {number} */ (jspb.Message.getFieldWithDefault(this, 1, 0)); +}; + + +/** + * @param {number} value + * @return {!proto.routeguide.RouteSummary} returns this + */ +proto.routeguide.RouteSummary.prototype.setPointCount = function(value) { + return jspb.Message.setProto3IntField(this, 1, value); +}; + + +/** + * optional int32 feature_count = 2; + * @return {number} + */ +proto.routeguide.RouteSummary.prototype.getFeatureCount = function() { + return /** @type {number} */ (jspb.Message.getFieldWithDefault(this, 2, 0)); +}; + + +/** + * @param {number} value + * @return {!proto.routeguide.RouteSummary} returns this + */ +proto.routeguide.RouteSummary.prototype.setFeatureCount = function(value) { + return jspb.Message.setProto3IntField(this, 2, value); +}; + + +/** + * optional int32 distance = 3; + * @return {number} + */ +proto.routeguide.RouteSummary.prototype.getDistance = function() { + return /** @type {number} */ (jspb.Message.getFieldWithDefault(this, 3, 0)); +}; + + +/** + * @param {number} value + * @return {!proto.routeguide.RouteSummary} returns this + */ +proto.routeguide.RouteSummary.prototype.setDistance = function(value) { + return jspb.Message.setProto3IntField(this, 3, value); +}; + + +/** + * optional int32 elapsed_time = 4; + * @return {number} + */ +proto.routeguide.RouteSummary.prototype.getElapsedTime = function() { + return /** @type {number} */ (jspb.Message.getFieldWithDefault(this, 4, 0)); +}; + + +/** + * @param {number} value + * @return {!proto.routeguide.RouteSummary} returns this + */ +proto.routeguide.RouteSummary.prototype.setElapsedTime = function(value) { + return jspb.Message.setProto3IntField(this, 4, value); +}; + + +goog.object.extend(exports, proto.routeguide); diff --git a/examples/routeguide/static_codegen/route_guide_server.js b/examples/routeguide/static_codegen/route_guide_server.js new file mode 100644 index 000000000..eb1fd283d --- /dev/null +++ b/examples/routeguide/static_codegen/route_guide_server.js @@ -0,0 +1,244 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +var messages = require('./route_guide_pb'); +var services = require('./route_guide_grpc_pb'); + +var fs = require('fs'); +var parseArgs = require('minimist'); +var path = require('path'); +var _ = require('lodash'); +var grpc = require('@grpc/grpc-js'); + +var COORD_FACTOR = 1e7; + +/** + * For simplicity, a point is a record type that looks like + * {latitude: number, longitude: number}, and a feature is a record type that + * looks like {name: string, location: point}. feature objects with name==='' + * are points with no feature. + */ + +/** + * List of feature objects at points that have been requested so far. + */ +var feature_list = []; + +/** + * Get a feature object at the given point, or creates one if it does not exist. + * @param {point} point The point to check + * @return {feature} The feature object at the point. Note that an empty name + * indicates no feature + */ +function checkFeature(point) { + var feature; + // Check if there is already a feature object for the given point + for (var i = 0; i < feature_list.length; i++) { + feature = feature_list[i]; + if (feature.getLocation().getLatitude() === point.getLatitude() && + feature.getLocation().getLongitude() === point.getLongitude()) { + return feature; + } + } + var name = ''; + feature = new messages.Feature(); + feature.setName(name); + feature.setLocation(point); + return feature; +} + +/** + * getFeature request handler. Gets a request with a point, and responds with a + * feature object indicating whether there is a feature at that point. + * @param {EventEmitter} call Call object for the handler to process + * @param {function(Error, feature)} callback Response callback + */ +function getFeature(call, callback) { + callback(null, checkFeature(call.request)); +} + +/** + * listFeatures request handler. Gets a request with two points, and responds + * with a stream of all features in the bounding box defined by those points. + * @param {Writable} call Writable stream for responses with an additional + * request property for the request value. + */ +function listFeatures(call) { + var lo = call.request.getLo(); + var hi = call.request.getHi(); + var left = _.min([lo.getLongitude(), hi.getLongitude()]); + var right = _.max([lo.getLongitude(), hi.getLongitude()]); + var top = _.max([lo.getLatitude(), hi.getLatitude()]); + var bottom = _.min([lo.getLatitude(), hi.getLatitude()]); + // For each feature, check if it is in the given bounding box + _.each(feature_list, function(feature) { + if (feature.getName() === '') { + return; + } + if (feature.getLocation().getLongitude() >= left && + feature.getLocation().getLongitude() <= right && + feature.getLocation().getLatitude() >= bottom && + feature.getLocation().getLatitude() <= top) { + call.write(feature); + } + }); + call.end(); +} + +/** + * Calculate the distance between two points using the "haversine" formula. + * The formula is based on http://mathforum.org/library/drmath/view/51879.html. + * @param start The starting point + * @param end The end point + * @return The distance between the points in meters + */ +function getDistance(start, end) { + function toRadians(num) { + return num * Math.PI / 180; + } + var R = 6371000; // earth radius in metres + var lat1 = toRadians(start.getLatitude() / COORD_FACTOR); + var lat2 = toRadians(end.getLatitude() / COORD_FACTOR); + var lon1 = toRadians(start.getLongitude() / COORD_FACTOR); + var lon2 = toRadians(end.getLongitude() / COORD_FACTOR); + + var deltalat = lat2-lat1; + var deltalon = lon2-lon1; + var a = Math.sin(deltalat/2) * Math.sin(deltalat/2) + + Math.cos(lat1) * Math.cos(lat2) * + Math.sin(deltalon/2) * Math.sin(deltalon/2); + var c = 2 * Math.atan2(Math.sqrt(a), Math.sqrt(1-a)); + return R * c; +} + +/** + * recordRoute handler. Gets a stream of points, and responds with statistics + * about the "trip": number of points, number of known features visited, total + * distance traveled, and total time spent. + * @param {Readable} call The request point stream. + * @param {function(Error, routeSummary)} callback The callback to pass the + * response to + */ +function recordRoute(call, callback) { + var point_count = 0; + var feature_count = 0; + var distance = 0; + var previous = null; + // Start a timer + var start_time = process.hrtime(); + call.on('data', function(point) { + point_count += 1; + if (checkFeature(point).name !== '') { + feature_count += 1; + } + /* For each point after the first, add the incremental distance from the + * previous point to the total distance value */ + if (previous != null) { + distance += getDistance(previous, point); + } + previous = point; + }); + call.on('end', function() { + var summary = new messages.RouteSummary(); + summary.setPointCount(point_count); + summary.setFeatureCount(feature_count); + // Cast the distance to an integer + summary.setDistance(distance|0); + // End the timer + summary.setElapsedTime(process.hrtime(start_time)[0]); + callback(null, summary); + }); +} + +var route_notes = {}; + +/** + * Turn the point into a dictionary key. + * @param {point} point The point to use + * @return {string} The key for an object + */ +function pointKey(point) { + return point.getLatitude() + ' ' + point.getLongitude(); +} + +/** + * routeChat handler. Receives a stream of message/location pairs, and responds + * with a stream of all previous messages at each of those locations. + * @param {Duplex} call The stream for incoming and outgoing messages + */ +function routeChat(call) { + call.on('data', function(note) { + var key = pointKey(note.getLocation()); + /* For each note sent, respond with all previous notes that correspond to + * the same point */ + if (route_notes.hasOwnProperty(key)) { + _.each(route_notes[key], function(note) { + call.write(note); + }); + } else { + route_notes[key] = []; + } + // Then add the new note to the list + route_notes[key].push(note); + }); + call.on('end', function() { + call.end(); + }); +} + +/** + * Get a new server with the handler functions in this file bound to the methods + * it serves. + * @return {Server} The new server object + */ +function getServer() { + var server = new grpc.Server(); + server.addService(services.RouteGuideService, { + getFeature: getFeature, + listFeatures: listFeatures, + recordRoute: recordRoute, + routeChat: routeChat + }); + return server; +} + +if (require.main === module) { + // If this is run as a script, start a server on an unused port + var routeServer = getServer(); + routeServer.bindAsync('0.0.0.0:50051', grpc.ServerCredentials.createInsecure(), () => { + var argv = parseArgs(process.argv, { + string: 'db_path' + }); + fs.readFile(path.resolve(argv.db_path), function(err, data) { + if (err) throw err; + // Transform the loaded features to Feature objects + feature_list = _.map(JSON.parse(data), function(value) { + var feature = new messages.Feature(); + feature.setName(value.name); + var location = new messages.Point(); + location.setLatitude(value.location.latitude); + location.setLongitude(value.location.longitude); + feature.setLocation(location); + return feature; + }); + routeServer.start(); + }); + }); +} + +exports.getServer = getServer; diff --git a/examples/xds/greeter_client.js b/examples/xds/greeter_client.js new file mode 100644 index 000000000..17203742e --- /dev/null +++ b/examples/xds/greeter_client.js @@ -0,0 +1,62 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +var PROTO_PATH = __dirname + '/../protos/helloworld.proto'; + +var parseArgs = require('minimist'); +var grpc = require('@grpc/grpc-js'); +var grpc_xds = require('@grpc/grpc-js-xds'); +grpc_xds.register(); + +var protoLoader = require('@grpc/proto-loader'); +var packageDefinition = protoLoader.loadSync( + PROTO_PATH, + {keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true + }); +var hello_proto = grpc.loadPackageDefinition(packageDefinition).helloworld; + +function main() { + var argv = parseArgs(process.argv.slice(2), { + string: 'target' + }); + var target; + if (argv.target) { + target = argv.target; + } else { + target = 'localhost:50051'; + } + var client = new hello_proto.Greeter(target, + grpc.credentials.createInsecure()); + var user; + if (argv._.length > 0) { + user = argv._[0]; + } else { + user = 'world'; + } + client.sayHello({name: user}, function(err, response) { + if (err) throw err; + console.log('Greeting:', response.message); + client.close(); + }); +} + +main(); diff --git a/gulpfile.ts b/gulpfile.ts index 7ac4e9a0b..2c2c2e374 100644 --- a/gulpfile.ts +++ b/gulpfile.ts @@ -19,10 +19,11 @@ import * as gulp from 'gulp'; import * as healthCheck from './packages/grpc-health-check/gulpfile'; import * as jsCore from './packages/grpc-js/gulpfile'; import * as jsXds from './packages/grpc-js-xds/gulpfile'; +import * as reflection from './packages/grpc-reflection/gulpfile'; import * as protobuf from './packages/proto-loader/gulpfile'; import * as internalTest from './test/gulpfile'; -const installAll = gulp.series(jsCore.install, healthCheck.install, protobuf.install, internalTest.install, jsXds.install); +const installAll = gulp.series(jsCore.install, healthCheck.install, protobuf.install, internalTest.install, jsXds.install, reflection.install); const lint = gulp.parallel(jsCore.lint); @@ -40,7 +41,7 @@ const nativeTestOnly = gulp.parallel(healthCheck.test); const nativeTest = gulp.series(build, nativeTestOnly); -const testOnly = gulp.parallel(jsCore.test, nativeTestOnly, protobuf.test, jsXds.test); +const testOnly = gulp.parallel(jsCore.test, nativeTestOnly, protobuf.test, jsXds.test, reflection.test); const test = gulp.series(build, testOnly, internalTest.test); diff --git a/package.json b/package.json index 70a15fbbf..a5733f377 100644 --- a/package.json +++ b/package.json @@ -20,14 +20,13 @@ "del": "^3.0.0", "execa": "^0.8.0", "gulp": "^4.0.1", - "gulp-jsdoc3": "^1.0.1", "gulp-jshint": "^2.0.4", "gulp-mocha": "^4.3.1", "gulp-sourcemaps": "^2.6.1", "gulp-tslint": "^8.1.1", "gulp-typescript": "^3.2.2", "gulp-util": "^3.0.8", - "jsdoc": "^3.3.2", + "jsdoc": "^4.0.3", "jshint": "^2.9.5", "make-dir": "^1.1.0", "merge2": "^1.1.0", @@ -54,7 +53,8 @@ "include": [ "packages/grpc-health-check/health.js", "packages/grpc-js/build/src/*", - "packages/proto-loader/build/src/*" + "packages/proto-loader/build/src/*", + "packages/grpc-reflection/build/src/*" ], "cache": true, "all": true diff --git a/packages/grpc-health-check/README.md b/packages/grpc-health-check/README.md index 62a88347f..659dab140 100644 --- a/packages/grpc-health-check/README.md +++ b/packages/grpc-health-check/README.md @@ -4,11 +4,7 @@ Health check client and service for use with gRPC-node. ## Background -This package exports both a client and server that adhere to the [gRPC Health Checking Protocol](https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - -By using this package, clients and servers can rely on common proto and service definitions. This means: -- Clients can use the generated stubs to health check _any_ server that adheres to the protocol. -- Servers do not reimplement common logic for publishing health statuses. +This package provides an implementation of the [gRPC Health Checking Protocol](https://github.com/grpc/grpc/blob/master/doc/health-checking.md) service, as described in [gRFC L106](https://github.com/grpc/proposal/blob/master/L106-node-heath-check-library.md). ## Installation @@ -22,33 +18,39 @@ npm install grpc-health-check ### Server -Any gRPC-node server can use `grpc-health-check` to adhere to the gRPC Health Checking Protocol. +Any gRPC-node server can use `grpc-health-check` to adhere to the gRPC Health Checking Protocol. The following shows how this package can be added to a pre-existing gRPC server. -```javascript 1.8 +```typescript // Import package -let health = require('grpc-health-check'); +import { HealthImplementation, ServingStatusMap } from 'grpc-health-check'; // Define service status map. Key is the service name, value is the corresponding status. -// By convention, the empty string "" key represents that status of the entire server. +// By convention, the empty string '' key represents that status of the entire server. const statusMap = { - "ServiceFoo": proto.grpc.health.v1.HealthCheckResponse.ServingStatus.SERVING, - "ServiceBar": proto.grpc.health.v1.HealthCheckResponse.ServingStatus.NOT_SERVING, - "": proto.grpc.health.v1.HealthCheckResponse.ServingStatus.NOT_SERVING, + 'ServiceFoo': 'SERVING', + 'ServiceBar': 'NOT_SERVING', + '': 'NOT_SERVING', }; // Construct the service implementation -let healthImpl = new health.Implementation(statusMap); +const healthImpl = new HealthImplementation(statusMap); + +healthImpl.addToServer(server); -// Add the service and implementation to your pre-existing gRPC-node server -server.addService(health.service, healthImpl); +// When ServiceBar comes up +healthImpl.setStatus('serviceBar', 'SERVING'); ``` Congrats! Your server now allows any client to run a health check against it. ### Client -Any gRPC-node client can use `grpc-health-check` to run health checks against other servers that follow the protocol. +Any gRPC-node client can use the `service` object exported by `grpc-health-check` to generate clients that can make health check requests. + +### Command Line Usage + +The absolute path to `health.proto` can be obtained on the command line with `node -p 'require("grpc-health-check").protoPath'`. ## Contributing diff --git a/packages/grpc-health-check/gulpfile.ts b/packages/grpc-health-check/gulpfile.ts index 0ddaa257e..f47087b14 100644 --- a/packages/grpc-health-check/gulpfile.ts +++ b/packages/grpc-health-check/gulpfile.ts @@ -19,22 +19,32 @@ import * as gulp from 'gulp'; import * as mocha from 'gulp-mocha'; import * as execa from 'execa'; import * as path from 'path'; -import * as del from 'del'; -import {linkSync} from '../../util'; const healthCheckDir = __dirname; -const baseDir = path.resolve(healthCheckDir, '..', '..'); -const testDir = path.resolve(healthCheckDir, 'test'); +const outDir = path.resolve(healthCheckDir, 'build'); -const runInstall = () => execa('npm', ['install', '--unsafe-perm'], {cwd: healthCheckDir, stdio: 'inherit'}); +const execNpmVerb = (verb: string, ...args: string[]) => + execa('npm', [verb, ...args], {cwd: healthCheckDir, stdio: 'inherit'}); +const execNpmCommand = execNpmVerb.bind(null, 'run'); -const runRebuild = () => execa('npm', ['rebuild', '--unsafe-perm'], {cwd: healthCheckDir, stdio: 'inherit'}); +const install = () => execNpmVerb('install', '--unsafe-perm'); -const install = gulp.series(runInstall, runRebuild); +/** + * Transpiles TypeScript files in src/ to JavaScript according to the settings + * found in tsconfig.json. + */ +const compile = () => execNpmCommand('compile'); + +const runTests = () => { + return gulp.src(`${outDir}/test/**/*.js`) + .pipe(mocha({reporter: 'mocha-jenkins-reporter', + require: ['ts-node/register']})); +}; -const test = () => gulp.src(`${testDir}/*.js`).pipe(mocha({reporter: 'mocha-jenkins-reporter'})); +const test = gulp.series(install, runTests); export { install, + compile, test -} \ No newline at end of file +} diff --git a/packages/grpc-health-check/health.js b/packages/grpc-health-check/health.js deleted file mode 100644 index cfa9c8348..000000000 --- a/packages/grpc-health-check/health.js +++ /dev/null @@ -1,55 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -'use strict'; - -var grpc = require('grpc'); - -var _get = require('lodash.get'); -var _clone = require('lodash.clone') - -var health_messages = require('./v1/health_pb'); -var health_service = require('./v1/health_grpc_pb'); - -function HealthImplementation(statusMap) { - this.statusMap = _clone(statusMap); -} - -HealthImplementation.prototype.setStatus = function(service, status) { - this.statusMap[service] = status; -}; - -HealthImplementation.prototype.check = function(call, callback){ - var service = call.request.getService(); - var status = _get(this.statusMap, service, null); - if (status === null) { - // TODO(murgatroid99): Do this without an explicit reference to grpc. - callback({code:grpc.status.NOT_FOUND}); - } else { - var response = new health_messages.HealthCheckResponse(); - response.setStatus(status); - callback(null, response); - } -}; - -module.exports = { - Client: health_service.HealthClient, - messages: health_messages, - service: health_service.HealthService, - Implementation: HealthImplementation -}; diff --git a/packages/grpc-health-check/package.json b/packages/grpc-health-check/package.json index e9b836346..ecdaea579 100644 --- a/packages/grpc-health-check/package.json +++ b/packages/grpc-health-check/package.json @@ -1,6 +1,6 @@ { "name": "grpc-health-check", - "version": "1.8.0", + "version": "2.0.0", "author": "Google Inc.", "description": "Health check client and service for use with gRPC-node", "repository": { @@ -14,18 +14,27 @@ "email": "mlumish@google.com" } ], + "scripts": { + "compile": "tsc -p .", + "prepare": "npm run generate-types && npm run compile", + "generate-types": "proto-loader-gen-types --keepCase --longs String --enums String --defaults --oneofs --includeComments --includeDirs proto/ -O src/generated health/v1/health.proto", + "generate-test-types": "proto-loader-gen-types --keepCase --longs String --enums String --defaults --oneofs --includeComments --includeDirs proto/ -O test/generated --grpcLib=@grpc/grpc-js health/v1/health.proto" + }, "dependencies": { - "google-protobuf": "^3.4.0", - "grpc": "^1.6.0", - "lodash.clone": "^4.5.0", - "lodash.get": "^4.4.2" + "@grpc/proto-loader": "^0.7.10" }, "files": [ "LICENSE", "README.md", - "health.js", - "v1" + "src", + "build", + "proto" ], - "main": "health.js", - "license": "Apache-2.0" + "main": "build/src/health.js", + "types": "build/src/health.d.ts", + "license": "Apache-2.0", + "devDependencies": { + "@grpc/grpc-js": "file:../grpc-js", + "typescript": "^5.2.2" + } } diff --git a/packages/grpc-health-check/proto/health/v1/health.proto b/packages/grpc-health-check/proto/health/v1/health.proto new file mode 100644 index 000000000..13b03f567 --- /dev/null +++ b/packages/grpc-health-check/proto/health/v1/health.proto @@ -0,0 +1,73 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto + +syntax = "proto3"; + +package grpc.health.v1; + +option csharp_namespace = "Grpc.Health.V1"; +option go_package = "google.golang.org/grpc/health/grpc_health_v1"; +option java_multiple_files = true; +option java_outer_classname = "HealthProto"; +option java_package = "io.grpc.health.v1"; + +message HealthCheckRequest { + string service = 1; +} + +message HealthCheckResponse { + enum ServingStatus { + UNKNOWN = 0; + SERVING = 1; + NOT_SERVING = 2; + SERVICE_UNKNOWN = 3; // Used only by the Watch method. + } + ServingStatus status = 1; +} + +// Health is gRPC's mechanism for checking whether a server is able to handle +// RPCs. Its semantics are documented in +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md. +service Health { + // Check gets the health of the specified service. If the requested service + // is unknown, the call will fail with status NOT_FOUND. If the caller does + // not specify a service name, the server should respond with its overall + // health status. + // + // Clients should set a deadline when calling Check, and can declare the + // server unhealthy if they do not receive a timely response. + // + // Check implementations should be idempotent and side effect free. + rpc Check(HealthCheckRequest) returns (HealthCheckResponse); + + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current + // serving status. It will then subsequently send a new message whenever + // the service's serving status changes. + // + // If the requested service is unknown when the call is received, the + // server will send a message setting the serving status to + // SERVICE_UNKNOWN but will *not* terminate the call. If at some + // future point, the serving status of the service becomes known, the + // server will send a new message with the service's serving status. + // + // If the call terminates with status UNIMPLEMENTED, then clients + // should assume this method is not supported and should not retry the + // call. If the call terminates with any other status (including OK), + // clients should retry the call with appropriate exponential backoff. + rpc Watch(HealthCheckRequest) returns (stream HealthCheckResponse); +} diff --git a/packages/grpc-health-check/src/generated/grpc/health/v1/Health.ts b/packages/grpc-health-check/src/generated/grpc/health/v1/Health.ts new file mode 100644 index 000000000..a308498f4 --- /dev/null +++ b/packages/grpc-health-check/src/generated/grpc/health/v1/Health.ts @@ -0,0 +1,10 @@ +// Original file: proto/health/v1/health.proto + +import type { MethodDefinition } from '@grpc/proto-loader' +import type { HealthCheckRequest as _grpc_health_v1_HealthCheckRequest, HealthCheckRequest__Output as _grpc_health_v1_HealthCheckRequest__Output } from '../../../grpc/health/v1/HealthCheckRequest'; +import type { HealthCheckResponse as _grpc_health_v1_HealthCheckResponse, HealthCheckResponse__Output as _grpc_health_v1_HealthCheckResponse__Output } from '../../../grpc/health/v1/HealthCheckResponse'; + +export interface HealthDefinition { + Check: MethodDefinition<_grpc_health_v1_HealthCheckRequest, _grpc_health_v1_HealthCheckResponse, _grpc_health_v1_HealthCheckRequest__Output, _grpc_health_v1_HealthCheckResponse__Output> + Watch: MethodDefinition<_grpc_health_v1_HealthCheckRequest, _grpc_health_v1_HealthCheckResponse, _grpc_health_v1_HealthCheckRequest__Output, _grpc_health_v1_HealthCheckResponse__Output> +} diff --git a/packages/grpc-health-check/src/generated/grpc/health/v1/HealthCheckRequest.ts b/packages/grpc-health-check/src/generated/grpc/health/v1/HealthCheckRequest.ts new file mode 100644 index 000000000..71ae9df4e --- /dev/null +++ b/packages/grpc-health-check/src/generated/grpc/health/v1/HealthCheckRequest.ts @@ -0,0 +1,10 @@ +// Original file: proto/health/v1/health.proto + + +export interface HealthCheckRequest { + 'service'?: (string); +} + +export interface HealthCheckRequest__Output { + 'service': (string); +} diff --git a/packages/grpc-health-check/src/generated/grpc/health/v1/HealthCheckResponse.ts b/packages/grpc-health-check/src/generated/grpc/health/v1/HealthCheckResponse.ts new file mode 100644 index 000000000..ee4f375ae --- /dev/null +++ b/packages/grpc-health-check/src/generated/grpc/health/v1/HealthCheckResponse.ts @@ -0,0 +1,37 @@ +// Original file: proto/health/v1/health.proto + + +// Original file: proto/health/v1/health.proto + +export const _grpc_health_v1_HealthCheckResponse_ServingStatus = { + UNKNOWN: 'UNKNOWN', + SERVING: 'SERVING', + NOT_SERVING: 'NOT_SERVING', + /** + * Used only by the Watch method. + */ + SERVICE_UNKNOWN: 'SERVICE_UNKNOWN', +} as const; + +export type _grpc_health_v1_HealthCheckResponse_ServingStatus = + | 'UNKNOWN' + | 0 + | 'SERVING' + | 1 + | 'NOT_SERVING' + | 2 + /** + * Used only by the Watch method. + */ + | 'SERVICE_UNKNOWN' + | 3 + +export type _grpc_health_v1_HealthCheckResponse_ServingStatus__Output = typeof _grpc_health_v1_HealthCheckResponse_ServingStatus[keyof typeof _grpc_health_v1_HealthCheckResponse_ServingStatus] + +export interface HealthCheckResponse { + 'status'?: (_grpc_health_v1_HealthCheckResponse_ServingStatus); +} + +export interface HealthCheckResponse__Output { + 'status': (_grpc_health_v1_HealthCheckResponse_ServingStatus__Output); +} diff --git a/packages/grpc-health-check/src/health.ts b/packages/grpc-health-check/src/health.ts new file mode 100644 index 000000000..86ca1af0d --- /dev/null +++ b/packages/grpc-health-check/src/health.ts @@ -0,0 +1,112 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import * as path from 'path'; +import { loadSync, ServiceDefinition } from '@grpc/proto-loader'; +import { HealthCheckRequest__Output } from './generated/grpc/health/v1/HealthCheckRequest'; +import { HealthCheckResponse } from './generated/grpc/health/v1/HealthCheckResponse'; +import { sendUnaryData, Server, ServerUnaryCall, ServerWritableStream } from './server-type'; + +const loadedProto = loadSync('health/v1/health.proto', { + keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true, + includeDirs: [`${__dirname}/../../proto`], +}); + +export const service = loadedProto['grpc.health.v1.Health'] as ServiceDefinition; + +const GRPC_STATUS_NOT_FOUND = 5; + +export type ServingStatus = 'UNKNOWN' | 'SERVING' | 'NOT_SERVING'; + +export interface ServingStatusMap { + [serviceName: string]: ServingStatus; +} + +interface StatusWatcher { + (status: ServingStatus): void; +} + +export class HealthImplementation { + private statusMap: Map = new Map(); + private watchers: Map> = new Map(); + constructor(initialStatusMap?: ServingStatusMap) { + if (initialStatusMap) { + for (const [serviceName, status] of Object.entries(initialStatusMap)) { + this.statusMap.set(serviceName, status); + } + } + } + + setStatus(service: string, status: ServingStatus) { + this.statusMap.set(service, status); + for (const watcher of this.watchers.get(service) ?? []) { + watcher(status); + } + } + + private addWatcher(service: string, watcher: StatusWatcher) { + const existingWatcherSet = this.watchers.get(service); + if (existingWatcherSet) { + existingWatcherSet.add(watcher); + } else { + const newWatcherSet = new Set(); + newWatcherSet.add(watcher); + this.watchers.set(service, newWatcherSet); + } + } + + private removeWatcher(service: string, watcher: StatusWatcher) { + this.watchers.get(service)?.delete(watcher); + } + + addToServer(server: Server) { + server.addService(service, { + check: (call: ServerUnaryCall, callback: sendUnaryData) => { + const serviceName = call.request.service; + const status = this.statusMap.get(serviceName); + if (status) { + callback(null, {status: status}); + } else { + callback({code: GRPC_STATUS_NOT_FOUND, details: `Health status unknown for service ${serviceName}`}); + } + }, + watch: (call: ServerWritableStream) => { + const serviceName = call.request.service; + const statusWatcher = (status: ServingStatus) => { + call.write({status: status}); + }; + this.addWatcher(serviceName, statusWatcher); + call.on('cancelled', () => { + this.removeWatcher(serviceName, statusWatcher); + }); + const currentStatus = this.statusMap.get(serviceName); + if (currentStatus) { + call.write({status: currentStatus}); + } else { + call.write({status: 'SERVICE_UNKNOWN'}); + } + } + }); + } +} + +export const protoPath = path.resolve(__dirname, '../../proto/health/v1/health.proto'); diff --git a/packages/grpc-health-check/src/object-stream.ts b/packages/grpc-health-check/src/object-stream.ts new file mode 100644 index 000000000..2f70cfa7e --- /dev/null +++ b/packages/grpc-health-check/src/object-stream.ts @@ -0,0 +1,75 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { Readable, Writable } from 'stream'; + +interface EmitterAugmentation1 { + addListener(event: Name, listener: (arg1: Arg) => void): this; + emit(event: Name, arg1: Arg): boolean; + on(event: Name, listener: (arg1: Arg) => void): this; + once(event: Name, listener: (arg1: Arg) => void): this; + prependListener(event: Name, listener: (arg1: Arg) => void): this; + prependOnceListener(event: Name, listener: (arg1: Arg) => void): this; + removeListener(event: Name, listener: (arg1: Arg) => void): this; +} + +/* eslint-disable @typescript-eslint/no-explicit-any */ + +export type WriteCallback = (error: Error | null | undefined) => void; + +export interface IntermediateObjectReadable extends Readable { + read(size?: number): any & T; +} + +export type ObjectReadable = { + read(size?: number): T; +} & EmitterAugmentation1<'data', T> & + IntermediateObjectReadable; + +export interface IntermediateObjectWritable extends Writable { + _write(chunk: any & T, encoding: string, callback: Function): void; + write(chunk: any & T, cb?: WriteCallback): boolean; + write(chunk: any & T, encoding?: any, cb?: WriteCallback): boolean; + setDefaultEncoding(encoding: string): this; + end(): ReturnType extends Writable ? this : void; + end( + chunk: any & T, + cb?: Function + ): ReturnType extends Writable ? this : void; + end( + chunk: any & T, + encoding?: any, + cb?: Function + ): ReturnType extends Writable ? this : void; +} + +export interface ObjectWritable extends IntermediateObjectWritable { + _write(chunk: T, encoding: string, callback: Function): void; + write(chunk: T, cb?: Function): boolean; + write(chunk: T, encoding?: any, cb?: Function): boolean; + setDefaultEncoding(encoding: string): this; + end(): ReturnType extends Writable ? this : void; + end( + chunk: T, + cb?: Function + ): ReturnType extends Writable ? this : void; + end( + chunk: T, + encoding?: any, + cb?: Function + ): ReturnType extends Writable ? this : void; +} diff --git a/packages/grpc-health-check/src/server-type.ts b/packages/grpc-health-check/src/server-type.ts new file mode 100644 index 000000000..f07704e87 --- /dev/null +++ b/packages/grpc-health-check/src/server-type.ts @@ -0,0 +1,103 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { ServiceDefinition } from '@grpc/proto-loader'; +import { ObjectReadable, ObjectWritable } from './object-stream'; +import { EventEmitter } from 'events'; + +type Metadata = any; + +interface StatusObject { + code: number; + details: string; + metadata: Metadata; +} + +type Deadline = Date | number; + +type ServerStatusResponse = Partial; + +type ServerErrorResponse = ServerStatusResponse & Error; + +type ServerSurfaceCall = { + cancelled: boolean; + readonly metadata: Metadata; + getPeer(): string; + sendMetadata(responseMetadata: Metadata): void; + getDeadline(): Deadline; + getPath(): string; +} & EventEmitter; + +export type ServerUnaryCall = ServerSurfaceCall & { + request: RequestType; +}; +type ServerReadableStream = + ServerSurfaceCall & ObjectReadable; +export type ServerWritableStream = + ServerSurfaceCall & + ObjectWritable & { + request: RequestType; + end: (metadata?: Metadata) => void; + }; +type ServerDuplexStream = ServerSurfaceCall & + ObjectReadable & + ObjectWritable & { end: (metadata?: Metadata) => void }; + +// Unary response callback signature. +export type sendUnaryData = ( + error: ServerErrorResponse | ServerStatusResponse | null, + value?: ResponseType | null, + trailer?: Metadata, + flags?: number +) => void; + +// User provided handler for unary calls. +type handleUnaryCall = ( + call: ServerUnaryCall, + callback: sendUnaryData +) => void; + +// User provided handler for client streaming calls. +type handleClientStreamingCall = ( + call: ServerReadableStream, + callback: sendUnaryData +) => void; + +// User provided handler for server streaming calls. +type handleServerStreamingCall = ( + call: ServerWritableStream +) => void; + +// User provided handler for bidirectional streaming calls. +type handleBidiStreamingCall = ( + call: ServerDuplexStream +) => void; + +export type HandleCall = + | handleUnaryCall + | handleClientStreamingCall + | handleServerStreamingCall + | handleBidiStreamingCall; + +export type UntypedHandleCall = HandleCall; +export interface UntypedServiceImplementation { + [name: string]: UntypedHandleCall; +} + +export interface Server { + addService(service: ServiceDefinition, implementation: UntypedServiceImplementation): void; +} diff --git a/packages/grpc-health-check/test/generated/grpc/health/v1/Health.ts b/packages/grpc-health-check/test/generated/grpc/health/v1/Health.ts new file mode 100644 index 000000000..320958e3c --- /dev/null +++ b/packages/grpc-health-check/test/generated/grpc/health/v1/Health.ts @@ -0,0 +1,129 @@ +// Original file: proto/health/v1/health.proto + +import type * as grpc from '@grpc/grpc-js' +import type { MethodDefinition } from '@grpc/proto-loader' +import type { HealthCheckRequest as _grpc_health_v1_HealthCheckRequest, HealthCheckRequest__Output as _grpc_health_v1_HealthCheckRequest__Output } from '../../../grpc/health/v1/HealthCheckRequest'; +import type { HealthCheckResponse as _grpc_health_v1_HealthCheckResponse, HealthCheckResponse__Output as _grpc_health_v1_HealthCheckResponse__Output } from '../../../grpc/health/v1/HealthCheckResponse'; + +/** + * Health is gRPC's mechanism for checking whether a server is able to handle + * RPCs. Its semantics are documented in + * https://github.com/grpc/grpc/blob/master/doc/health-checking.md. + */ +export interface HealthClient extends grpc.Client { + /** + * Check gets the health of the specified service. If the requested service + * is unknown, the call will fail with status NOT_FOUND. If the caller does + * not specify a service name, the server should respond with its overall + * health status. + * + * Clients should set a deadline when calling Check, and can declare the + * server unhealthy if they do not receive a timely response. + * + * Check implementations should be idempotent and side effect free. + */ + Check(argument: _grpc_health_v1_HealthCheckRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_health_v1_HealthCheckResponse__Output>): grpc.ClientUnaryCall; + Check(argument: _grpc_health_v1_HealthCheckRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_health_v1_HealthCheckResponse__Output>): grpc.ClientUnaryCall; + Check(argument: _grpc_health_v1_HealthCheckRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_health_v1_HealthCheckResponse__Output>): grpc.ClientUnaryCall; + Check(argument: _grpc_health_v1_HealthCheckRequest, callback: grpc.requestCallback<_grpc_health_v1_HealthCheckResponse__Output>): grpc.ClientUnaryCall; + /** + * Check gets the health of the specified service. If the requested service + * is unknown, the call will fail with status NOT_FOUND. If the caller does + * not specify a service name, the server should respond with its overall + * health status. + * + * Clients should set a deadline when calling Check, and can declare the + * server unhealthy if they do not receive a timely response. + * + * Check implementations should be idempotent and side effect free. + */ + check(argument: _grpc_health_v1_HealthCheckRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_health_v1_HealthCheckResponse__Output>): grpc.ClientUnaryCall; + check(argument: _grpc_health_v1_HealthCheckRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_health_v1_HealthCheckResponse__Output>): grpc.ClientUnaryCall; + check(argument: _grpc_health_v1_HealthCheckRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_health_v1_HealthCheckResponse__Output>): grpc.ClientUnaryCall; + check(argument: _grpc_health_v1_HealthCheckRequest, callback: grpc.requestCallback<_grpc_health_v1_HealthCheckResponse__Output>): grpc.ClientUnaryCall; + + /** + * Performs a watch for the serving status of the requested service. + * The server will immediately send back a message indicating the current + * serving status. It will then subsequently send a new message whenever + * the service's serving status changes. + * + * If the requested service is unknown when the call is received, the + * server will send a message setting the serving status to + * SERVICE_UNKNOWN but will *not* terminate the call. If at some + * future point, the serving status of the service becomes known, the + * server will send a new message with the service's serving status. + * + * If the call terminates with status UNIMPLEMENTED, then clients + * should assume this method is not supported and should not retry the + * call. If the call terminates with any other status (including OK), + * clients should retry the call with appropriate exponential backoff. + */ + Watch(argument: _grpc_health_v1_HealthCheckRequest, metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientReadableStream<_grpc_health_v1_HealthCheckResponse__Output>; + Watch(argument: _grpc_health_v1_HealthCheckRequest, options?: grpc.CallOptions): grpc.ClientReadableStream<_grpc_health_v1_HealthCheckResponse__Output>; + /** + * Performs a watch for the serving status of the requested service. + * The server will immediately send back a message indicating the current + * serving status. It will then subsequently send a new message whenever + * the service's serving status changes. + * + * If the requested service is unknown when the call is received, the + * server will send a message setting the serving status to + * SERVICE_UNKNOWN but will *not* terminate the call. If at some + * future point, the serving status of the service becomes known, the + * server will send a new message with the service's serving status. + * + * If the call terminates with status UNIMPLEMENTED, then clients + * should assume this method is not supported and should not retry the + * call. If the call terminates with any other status (including OK), + * clients should retry the call with appropriate exponential backoff. + */ + watch(argument: _grpc_health_v1_HealthCheckRequest, metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientReadableStream<_grpc_health_v1_HealthCheckResponse__Output>; + watch(argument: _grpc_health_v1_HealthCheckRequest, options?: grpc.CallOptions): grpc.ClientReadableStream<_grpc_health_v1_HealthCheckResponse__Output>; + +} + +/** + * Health is gRPC's mechanism for checking whether a server is able to handle + * RPCs. Its semantics are documented in + * https://github.com/grpc/grpc/blob/master/doc/health-checking.md. + */ +export interface HealthHandlers extends grpc.UntypedServiceImplementation { + /** + * Check gets the health of the specified service. If the requested service + * is unknown, the call will fail with status NOT_FOUND. If the caller does + * not specify a service name, the server should respond with its overall + * health status. + * + * Clients should set a deadline when calling Check, and can declare the + * server unhealthy if they do not receive a timely response. + * + * Check implementations should be idempotent and side effect free. + */ + Check: grpc.handleUnaryCall<_grpc_health_v1_HealthCheckRequest__Output, _grpc_health_v1_HealthCheckResponse>; + + /** + * Performs a watch for the serving status of the requested service. + * The server will immediately send back a message indicating the current + * serving status. It will then subsequently send a new message whenever + * the service's serving status changes. + * + * If the requested service is unknown when the call is received, the + * server will send a message setting the serving status to + * SERVICE_UNKNOWN but will *not* terminate the call. If at some + * future point, the serving status of the service becomes known, the + * server will send a new message with the service's serving status. + * + * If the call terminates with status UNIMPLEMENTED, then clients + * should assume this method is not supported and should not retry the + * call. If the call terminates with any other status (including OK), + * clients should retry the call with appropriate exponential backoff. + */ + Watch: grpc.handleServerStreamingCall<_grpc_health_v1_HealthCheckRequest__Output, _grpc_health_v1_HealthCheckResponse>; + +} + +export interface HealthDefinition extends grpc.ServiceDefinition { + Check: MethodDefinition<_grpc_health_v1_HealthCheckRequest, _grpc_health_v1_HealthCheckResponse, _grpc_health_v1_HealthCheckRequest__Output, _grpc_health_v1_HealthCheckResponse__Output> + Watch: MethodDefinition<_grpc_health_v1_HealthCheckRequest, _grpc_health_v1_HealthCheckResponse, _grpc_health_v1_HealthCheckRequest__Output, _grpc_health_v1_HealthCheckResponse__Output> +} diff --git a/packages/grpc-health-check/test/generated/grpc/health/v1/HealthCheckRequest.ts b/packages/grpc-health-check/test/generated/grpc/health/v1/HealthCheckRequest.ts new file mode 100644 index 000000000..71ae9df4e --- /dev/null +++ b/packages/grpc-health-check/test/generated/grpc/health/v1/HealthCheckRequest.ts @@ -0,0 +1,10 @@ +// Original file: proto/health/v1/health.proto + + +export interface HealthCheckRequest { + 'service'?: (string); +} + +export interface HealthCheckRequest__Output { + 'service': (string); +} diff --git a/packages/grpc-health-check/test/generated/grpc/health/v1/HealthCheckResponse.ts b/packages/grpc-health-check/test/generated/grpc/health/v1/HealthCheckResponse.ts new file mode 100644 index 000000000..ee4f375ae --- /dev/null +++ b/packages/grpc-health-check/test/generated/grpc/health/v1/HealthCheckResponse.ts @@ -0,0 +1,37 @@ +// Original file: proto/health/v1/health.proto + + +// Original file: proto/health/v1/health.proto + +export const _grpc_health_v1_HealthCheckResponse_ServingStatus = { + UNKNOWN: 'UNKNOWN', + SERVING: 'SERVING', + NOT_SERVING: 'NOT_SERVING', + /** + * Used only by the Watch method. + */ + SERVICE_UNKNOWN: 'SERVICE_UNKNOWN', +} as const; + +export type _grpc_health_v1_HealthCheckResponse_ServingStatus = + | 'UNKNOWN' + | 0 + | 'SERVING' + | 1 + | 'NOT_SERVING' + | 2 + /** + * Used only by the Watch method. + */ + | 'SERVICE_UNKNOWN' + | 3 + +export type _grpc_health_v1_HealthCheckResponse_ServingStatus__Output = typeof _grpc_health_v1_HealthCheckResponse_ServingStatus[keyof typeof _grpc_health_v1_HealthCheckResponse_ServingStatus] + +export interface HealthCheckResponse { + 'status'?: (_grpc_health_v1_HealthCheckResponse_ServingStatus); +} + +export interface HealthCheckResponse__Output { + 'status': (_grpc_health_v1_HealthCheckResponse_ServingStatus__Output); +} diff --git a/packages/grpc-health-check/test/generated/health.ts b/packages/grpc-health-check/test/generated/health.ts new file mode 100644 index 000000000..afb2ced5f --- /dev/null +++ b/packages/grpc-health-check/test/generated/health.ts @@ -0,0 +1,26 @@ +import type * as grpc from '@grpc/grpc-js'; +import type { MessageTypeDefinition } from '@grpc/proto-loader'; + +import type { HealthClient as _grpc_health_v1_HealthClient, HealthDefinition as _grpc_health_v1_HealthDefinition } from './grpc/health/v1/Health'; + +type SubtypeConstructor any, Subtype> = { + new(...args: ConstructorParameters): Subtype; +}; + +export interface ProtoGrpcType { + grpc: { + health: { + v1: { + /** + * Health is gRPC's mechanism for checking whether a server is able to handle + * RPCs. Its semantics are documented in + * https://github.com/grpc/grpc/blob/master/doc/health-checking.md. + */ + Health: SubtypeConstructor & { service: _grpc_health_v1_HealthDefinition } + HealthCheckRequest: MessageTypeDefinition + HealthCheckResponse: MessageTypeDefinition + } + } + } +} + diff --git a/packages/grpc-health-check/test/health_test.js b/packages/grpc-health-check/test/health_test.js deleted file mode 100644 index a31d3b371..000000000 --- a/packages/grpc-health-check/test/health_test.js +++ /dev/null @@ -1,103 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -'use strict'; - -var assert = require('assert'); - -var health = require('../health'); - -var health_messages = require('../v1/health_pb'); - -var ServingStatus = health_messages.HealthCheckResponse.ServingStatus; - -var grpc = require('grpc'); - -describe('Health Checking', function() { - var statusMap = { - '': ServingStatus.SERVING, - 'grpc.test.TestServiceNotServing': ServingStatus.NOT_SERVING, - 'grpc.test.TestServiceServing': ServingStatus.SERVING - }; - var healthServer; - var healthImpl; - var healthClient; - before(function() { - healthServer = new grpc.Server(); - healthImpl = new health.Implementation(statusMap); - healthServer.addService(health.service, healthImpl); - var port_num = healthServer.bind('0.0.0.0:0', - grpc.ServerCredentials.createInsecure()); - healthServer.start(); - healthClient = new health.Client('localhost:' + port_num, - grpc.credentials.createInsecure()); - }); - after(function() { - healthServer.forceShutdown(); - }); - it('should say an enabled service is SERVING', function(done) { - var request = new health_messages.HealthCheckRequest(); - request.setService(''); - healthClient.check(request, function(err, response) { - assert.ifError(err); - assert.strictEqual(response.getStatus(), ServingStatus.SERVING); - done(); - }); - }); - it('should say that a disabled service is NOT_SERVING', function(done) { - var request = new health_messages.HealthCheckRequest(); - request.setService('grpc.test.TestServiceNotServing'); - healthClient.check(request, function(err, response) { - assert.ifError(err); - assert.strictEqual(response.getStatus(), ServingStatus.NOT_SERVING); - done(); - }); - }); - it('should say that an enabled service is SERVING', function(done) { - var request = new health_messages.HealthCheckRequest(); - request.setService('grpc.test.TestServiceServing'); - healthClient.check(request, function(err, response) { - assert.ifError(err); - assert.strictEqual(response.getStatus(), ServingStatus.SERVING); - done(); - }); - }); - it('should get NOT_FOUND if the service is not registered', function(done) { - var request = new health_messages.HealthCheckRequest(); - request.setService('not_registered'); - healthClient.check(request, function(err, response) { - assert(err); - assert.strictEqual(err.code, grpc.status.NOT_FOUND); - done(); - }); - }); - it('should get a different response if the status changes', function(done) { - var request = new health_messages.HealthCheckRequest(); - request.setService('transient'); - healthClient.check(request, function(err, response) { - assert(err); - assert.strictEqual(err.code, grpc.status.NOT_FOUND); - healthImpl.setStatus('transient', ServingStatus.SERVING); - healthClient.check(request, function(err, response) { - assert.ifError(err); - assert.strictEqual(response.getStatus(), ServingStatus.SERVING); - done(); - }); - }); - }); -}); diff --git a/packages/grpc-health-check/test/test-health.ts b/packages/grpc-health-check/test/test-health.ts new file mode 100644 index 000000000..80d60a234 --- /dev/null +++ b/packages/grpc-health-check/test/test-health.ts @@ -0,0 +1,152 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import * as assert from 'assert'; +import * as grpc from '@grpc/grpc-js'; +import { HealthImplementation, ServingStatusMap, service as healthServiceDefinition } from '../src/health'; +import { HealthClient } from './generated/grpc/health/v1/Health'; +import { HealthCheckResponse__Output, _grpc_health_v1_HealthCheckResponse_ServingStatus__Output } from './generated/grpc/health/v1/HealthCheckResponse'; + +describe('Health checking', () => { + const statusMap: ServingStatusMap = { + '': 'SERVING', + 'grpc.test.TestServiceNotServing': 'NOT_SERVING', + 'grpc.test.TestServiceServing': 'SERVING' + }; + let healthServer: grpc.Server; + let healthClient: HealthClient; + let healthImpl: HealthImplementation; + beforeEach(done => { + healthServer = new grpc.Server(); + healthImpl = new HealthImplementation(statusMap); + healthImpl.addToServer(healthServer); + healthServer.bindAsync('localhost:0', grpc.ServerCredentials.createInsecure(), (error, port) => { + if (error) { + done(error); + return; + } + const HealthClientConstructor = grpc.makeClientConstructor(healthServiceDefinition, 'grpc.health.v1.HealthService'); + healthClient = new HealthClientConstructor(`localhost:${port}`, grpc.credentials.createInsecure()) as unknown as HealthClient; + healthServer.start(); + done(); + }); + }); + afterEach((done) => { + healthClient.close(); + healthServer.tryShutdown(done); + }); + describe('check', () => { + it('Should say that an enabled service is SERVING', done => { + healthClient.check({service: ''}, (error, value) => { + assert.ifError(error); + assert.strictEqual(value?.status, 'SERVING'); + done(); + }); + }); + it('Should say that a disabled service is NOT_SERVING', done => { + healthClient.check({service: 'grpc.test.TestServiceNotServing'}, (error, value) => { + assert.ifError(error); + assert.strictEqual(value?.status, 'NOT_SERVING'); + done(); + }); + }); + it('Should get NOT_FOUND if the service is not registered', done => { + healthClient.check({service: 'not_registered'}, (error, value) => { + assert(error); + assert.strictEqual(error.code, grpc.status.NOT_FOUND); + done(); + }); + }); + it('Should get a different response if the health status changes', done => { + healthClient.check({service: 'transient'}, (error, value) => { + assert(error); + assert.strictEqual(error.code, grpc.status.NOT_FOUND); + healthImpl.setStatus('transient', 'SERVING'); + healthClient.check({service: 'transient'}, (error, value) => { + assert.ifError(error); + assert.strictEqual(value?.status, 'SERVING'); + done(); + }); + }); + }); + }); + describe('watch', () => { + it('Should respond with the health status for an existing service', done => { + const call = healthClient.watch({service: ''}); + call.on('data', (response: HealthCheckResponse__Output) => { + assert.strictEqual(response.status, 'SERVING'); + call.cancel(); + }); + call.on('error', () => {}); + call.on('status', status => { + assert.strictEqual(status.code, grpc.status.CANCELLED); + done(); + }); + }); + it('Should send a new update when the status changes', done => { + const receivedStatusList: _grpc_health_v1_HealthCheckResponse_ServingStatus__Output[] = []; + const call = healthClient.watch({service: 'grpc.test.TestServiceServing'}); + call.on('data', (response: HealthCheckResponse__Output) => { + switch (receivedStatusList.length) { + case 0: + assert.strictEqual(response.status, 'SERVING'); + healthImpl.setStatus('grpc.test.TestServiceServing', 'NOT_SERVING'); + break; + case 1: + assert.strictEqual(response.status, 'NOT_SERVING'); + call.cancel(); + break; + default: + assert.fail(`Unexpected third status update ${response.status}`); + } + receivedStatusList.push(response.status); + }); + call.on('error', () => {}); + call.on('status', status => { + assert.deepStrictEqual(receivedStatusList, ['SERVING', 'NOT_SERVING']); + assert.strictEqual(status.code, grpc.status.CANCELLED); + done(); + }); + }); + it('Should update when a service that did not exist is added', done => { + const receivedStatusList: _grpc_health_v1_HealthCheckResponse_ServingStatus__Output[] = []; + const call = healthClient.watch({service: 'transient'}); + call.on('data', (response: HealthCheckResponse__Output) => { + switch (receivedStatusList.length) { + case 0: + assert.strictEqual(response.status, 'SERVICE_UNKNOWN'); + healthImpl.setStatus('transient', 'SERVING'); + break; + case 1: + assert.strictEqual(response.status, 'SERVING'); + call.cancel(); + break; + default: + assert.fail(`Unexpected third status update ${response.status}`); + } + receivedStatusList.push(response.status); + }); + call.on('error', () => {}); + call.on('status', status => { + assert.deepStrictEqual(receivedStatusList, ['SERVICE_UNKNOWN', 'SERVING']); + assert.strictEqual(status.code, grpc.status.CANCELLED); + done(); + }); + }) + }); +}); diff --git a/packages/grpc-health-check/tsconfig.json b/packages/grpc-health-check/tsconfig.json new file mode 100644 index 000000000..763ceda98 --- /dev/null +++ b/packages/grpc-health-check/tsconfig.json @@ -0,0 +1,29 @@ +{ + "compilerOptions": { + "allowUnreachableCode": false, + "allowUnusedLabels": false, + "declaration": true, + "forceConsistentCasingInFileNames": true, + "noEmitOnError": true, + "noFallthroughCasesInSwitch": true, + "noImplicitReturns": true, + "pretty": true, + "sourceMap": true, + "strict": true, + "lib": ["es2017"], + "outDir": "build", + "target": "es2017", + "module": "commonjs", + "resolveJsonModule": true, + "incremental": true, + "types": ["mocha"], + "noUnusedLocals": true + }, + "include": [ + "src/**/*.ts", + "test/**/*.ts" + ], + "exclude": [ + "node_modules" + ] +} diff --git a/packages/grpc-js-xds/README.md b/packages/grpc-js-xds/README.md index bcea70456..5b7a5400a 100644 --- a/packages/grpc-js-xds/README.md +++ b/packages/grpc-js-xds/README.md @@ -1,6 +1,6 @@ # @grpc/grpc-js xDS plugin -This package provides support for the `xds://` URL scheme to the `@grpc/grpc-js` library. The latest version of this package is compatible with `@grpc/grpc-js` version 1.2.x. +This package provides support for the `xds://` URL scheme to the `@grpc/grpc-js` library. The latest version of this package is compatible with `@grpc/grpc-js` version 1.10.x. ## Installation @@ -22,4 +22,16 @@ const client = new MyServiceClient('xds:///example.com:123'); ## Supported Features - [xDS-Based Global Load Balancing](https://github.com/grpc/proposal/blob/master/A27-xds-global-load-balancing.md) - - [xDS traffic splitting and routing](https://github.com/grpc/proposal/blob/master/A28-xds-traffic-splitting-and-routing.md) \ No newline at end of file + - [xDS traffic splitting and routing](https://github.com/grpc/proposal/blob/master/A28-xds-traffic-splitting-and-routing.md) + - [xDS v3 API](https://github.com/grpc/proposal/blob/master/A30-xds-v3.md) + - [xDS Timeouts](https://github.com/grpc/proposal/blob/master/A31-xds-timeout-support-and-config-selector.md) + - [xDS Circuit Breaking](https://github.com/grpc/proposal/blob/master/A32-xds-circuit-breaking.md) + - [xDS Client-Side Fault Injection](https://github.com/grpc/proposal/blob/master/A33-Fault-Injection.md) + - [Client Status Discovery Service](https://github.com/grpc/proposal/blob/master/A40-csds-support.md) + - [Outlier Detection](https://github.com/grpc/proposal/blob/master/A50-xds-outlier-detection.md) + - [xDS Retry Support](https://github.com/grpc/proposal/blob/master/A44-xds-retry.md) + - [xDS Aggregate and Logical DNS Clusters](https://github.com/grpc/proposal/blob/master/A37-xds-aggregate-and-logical-dns-clusters.md) + - [xDS Federation](https://github.com/grpc/proposal/blob/master/A47-xds-federation.md) (Currently experimental, enabled by environment variable `GRPC_EXPERIMENTAL_XDS_FEDERATION`) + - [xDS Custom Load Balancer Configuration](https://github.com/grpc/proposal/blob/master/A52-xds-custom-lb-policies.md) (Custom load balancer registration not currently supported) + - [xDS Ring Hash LB Policy](https://github.com/grpc/proposal/blob/master/A42-xds-ring-hash-lb-policy.md) + - [`pick_first` via xDS](https://github.com/grpc/proposal/blob/master/A62-pick-first.md#pick_first-via-xds-1) (Currently experimental, enabled by environment variable `GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG`) diff --git a/packages/grpc-js-xds/deps/envoy-api b/packages/grpc-js-xds/deps/envoy-api index 50cef8fca..e53e7bbd0 160000 --- a/packages/grpc-js-xds/deps/envoy-api +++ b/packages/grpc-js-xds/deps/envoy-api @@ -1 +1 @@ -Subproject commit 50cef8fcab37ba59a61068934d08a3f4c28a681f +Subproject commit e53e7bbd012f81965f2e79848ad9a58ceb67201f diff --git a/packages/grpc-js-xds/deps/udpa b/packages/grpc-js-xds/deps/udpa deleted file mode 160000 index 3b31d022a..000000000 --- a/packages/grpc-js-xds/deps/udpa +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 3b31d022a144b334eb2224838e4d6952ab5253aa diff --git a/packages/grpc-js-xds/deps/xds b/packages/grpc-js-xds/deps/xds new file mode 160000 index 000000000..cb28da345 --- /dev/null +++ b/packages/grpc-js-xds/deps/xds @@ -0,0 +1 @@ +Subproject commit cb28da3451f158a947dfc45090fe92b07b243bc1 diff --git a/packages/grpc-js-xds/gulpfile.ts b/packages/grpc-js-xds/gulpfile.ts index 4ee6ac2c5..47ca71324 100644 --- a/packages/grpc-js-xds/gulpfile.ts +++ b/packages/grpc-js-xds/gulpfile.ts @@ -61,6 +61,11 @@ const cleanAll = gulp.parallel(clean); const compile = checkTask(() => execNpmCommand('compile')); const runTests = checkTask(() => { + process.env.GRPC_EXPERIMENTAL_XDS_FEDERATION = 'true'; + process.env.GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG = 'true'; + if (Number(process.versions.node.split('.')[0]) <= 14) { + process.env.GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH = 'false'; + } return gulp.src(`${outDir}/test/**/*.js`) .pipe(mocha({reporter: 'mocha-jenkins-reporter', require: ['ts-node/register']})); diff --git a/packages/grpc-js-xds/interop/Dockerfile b/packages/grpc-js-xds/interop/Dockerfile new file mode 100644 index 000000000..6239b5f22 --- /dev/null +++ b/packages/grpc-js-xds/interop/Dockerfile @@ -0,0 +1,38 @@ +# Copyright 2022 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Dockerfile for building the xDS interop client. To build the image, run the +# following command from grpc-node directory: +# docker build -t -f packages/grpc-js-xds/interop/Dockerfile . + +FROM node:18-slim as build + +# Make a grpc-node directory and copy the repo into it. +WORKDIR /node/src/grpc-node +COPY . . + +WORKDIR /node/src/grpc-node/packages/grpc-js +RUN npm install +WORKDIR /node/src/grpc-node/packages/grpc-js-xds +RUN npm install + +FROM gcr.io/distroless/nodejs18-debian11:latest +WORKDIR /node/src/grpc-node +COPY --from=build /node/src/grpc-node/packages/grpc-js ./packages/grpc-js/ +COPY --from=build /node/src/grpc-node/packages/grpc-js-xds ./packages/grpc-js-xds/ + +ENV GRPC_VERBOSITY="DEBUG" +ENV GRPC_TRACE=xds_client,xds_resolver,xds_cluster_manager,cds_balancer,xds_cluster_resolver,xds_cluster_impl,priority,weighted_target,round_robin,resolving_load_balancer,subchannel,keepalive,dns_resolver,fault_injection,http_filter,csds,outlier_detection,server,server_call,ring_hash + +ENTRYPOINT [ "/nodejs/bin/node", "/node/src/grpc-node/packages/grpc-js-xds/build/interop/xds-interop-client" ] diff --git a/packages/grpc-js-xds/interop/generated/grpc/testing/ClientConfigureRequest.ts b/packages/grpc-js-xds/interop/generated/grpc/testing/ClientConfigureRequest.ts index 7f07e8966..4128fdda4 100644 --- a/packages/grpc-js-xds/interop/generated/grpc/testing/ClientConfigureRequest.ts +++ b/packages/grpc-js-xds/interop/generated/grpc/testing/ClientConfigureRequest.ts @@ -5,7 +5,7 @@ * Metadata to be attached for the given type of RPCs. */ export interface _grpc_testing_ClientConfigureRequest_Metadata { - 'type'?: (_grpc_testing_ClientConfigureRequest_RpcType | keyof typeof _grpc_testing_ClientConfigureRequest_RpcType); + 'type'?: (_grpc_testing_ClientConfigureRequest_RpcType); 'key'?: (string); 'value'?: (string); } @@ -14,7 +14,7 @@ export interface _grpc_testing_ClientConfigureRequest_Metadata { * Metadata to be attached for the given type of RPCs. */ export interface _grpc_testing_ClientConfigureRequest_Metadata__Output { - 'type': (keyof typeof _grpc_testing_ClientConfigureRequest_RpcType); + 'type': (_grpc_testing_ClientConfigureRequest_RpcType__Output); 'key': (string); 'value': (string); } @@ -24,10 +24,24 @@ export interface _grpc_testing_ClientConfigureRequest_Metadata__Output { /** * Type of RPCs to send. */ -export enum _grpc_testing_ClientConfigureRequest_RpcType { - EMPTY_CALL = 0, - UNARY_CALL = 1, -} +export const _grpc_testing_ClientConfigureRequest_RpcType = { + EMPTY_CALL: 'EMPTY_CALL', + UNARY_CALL: 'UNARY_CALL', +} as const; + +/** + * Type of RPCs to send. + */ +export type _grpc_testing_ClientConfigureRequest_RpcType = + | 'EMPTY_CALL' + | 0 + | 'UNARY_CALL' + | 1 + +/** + * Type of RPCs to send. + */ +export type _grpc_testing_ClientConfigureRequest_RpcType__Output = typeof _grpc_testing_ClientConfigureRequest_RpcType[keyof typeof _grpc_testing_ClientConfigureRequest_RpcType] /** * Configurations for a test client. @@ -36,7 +50,7 @@ export interface ClientConfigureRequest { /** * The types of RPCs the client sends. */ - 'types'?: (_grpc_testing_ClientConfigureRequest_RpcType | keyof typeof _grpc_testing_ClientConfigureRequest_RpcType)[]; + 'types'?: (_grpc_testing_ClientConfigureRequest_RpcType)[]; /** * The collection of custom metadata to be attached to RPCs sent by the client. */ @@ -55,7 +69,7 @@ export interface ClientConfigureRequest__Output { /** * The types of RPCs the client sends. */ - 'types': (keyof typeof _grpc_testing_ClientConfigureRequest_RpcType)[]; + 'types': (_grpc_testing_ClientConfigureRequest_RpcType__Output)[]; /** * The collection of custom metadata to be attached to RPCs sent by the client. */ diff --git a/packages/grpc-js-xds/interop/generated/grpc/testing/GrpclbRouteType.ts b/packages/grpc-js-xds/interop/generated/grpc/testing/GrpclbRouteType.ts index 8ab0146b7..667442b41 100644 --- a/packages/grpc-js-xds/interop/generated/grpc/testing/GrpclbRouteType.ts +++ b/packages/grpc-js-xds/interop/generated/grpc/testing/GrpclbRouteType.ts @@ -8,17 +8,52 @@ * the address of this server from the gRPCLB server BalanceLoad RPC). Exactly * how this detection is done is context and server dependent. */ -export enum GrpclbRouteType { +export const GrpclbRouteType = { /** * Server didn't detect the route that a client took to reach it. */ - GRPCLB_ROUTE_TYPE_UNKNOWN = 0, + GRPCLB_ROUTE_TYPE_UNKNOWN: 'GRPCLB_ROUTE_TYPE_UNKNOWN', /** * Indicates that a client reached a server via gRPCLB fallback. */ - GRPCLB_ROUTE_TYPE_FALLBACK = 1, + GRPCLB_ROUTE_TYPE_FALLBACK: 'GRPCLB_ROUTE_TYPE_FALLBACK', /** * Indicates that a client reached a server as a gRPCLB-given backend. */ - GRPCLB_ROUTE_TYPE_BACKEND = 2, -} + GRPCLB_ROUTE_TYPE_BACKEND: 'GRPCLB_ROUTE_TYPE_BACKEND', +} as const; + +/** + * The type of route that a client took to reach a server w.r.t. gRPCLB. + * The server must fill in "fallback" if it detects that the RPC reached + * the server via the "gRPCLB fallback" path, and "backend" if it detects + * that the RPC reached the server via "gRPCLB backend" path (i.e. if it got + * the address of this server from the gRPCLB server BalanceLoad RPC). Exactly + * how this detection is done is context and server dependent. + */ +export type GrpclbRouteType = + /** + * Server didn't detect the route that a client took to reach it. + */ + | 'GRPCLB_ROUTE_TYPE_UNKNOWN' + | 0 + /** + * Indicates that a client reached a server via gRPCLB fallback. + */ + | 'GRPCLB_ROUTE_TYPE_FALLBACK' + | 1 + /** + * Indicates that a client reached a server as a gRPCLB-given backend. + */ + | 'GRPCLB_ROUTE_TYPE_BACKEND' + | 2 + +/** + * The type of route that a client took to reach a server w.r.t. gRPCLB. + * The server must fill in "fallback" if it detects that the RPC reached + * the server via the "gRPCLB fallback" path, and "backend" if it detects + * that the RPC reached the server via "gRPCLB backend" path (i.e. if it got + * the address of this server from the gRPCLB server BalanceLoad RPC). Exactly + * how this detection is done is context and server dependent. + */ +export type GrpclbRouteType__Output = typeof GrpclbRouteType[keyof typeof GrpclbRouteType] diff --git a/packages/grpc-js-xds/interop/generated/grpc/testing/LoadBalancerAccumulatedStatsResponse.ts b/packages/grpc-js-xds/interop/generated/grpc/testing/LoadBalancerAccumulatedStatsResponse.ts index 91157ac4e..000ef9ecf 100644 --- a/packages/grpc-js-xds/interop/generated/grpc/testing/LoadBalancerAccumulatedStatsResponse.ts +++ b/packages/grpc-js-xds/interop/generated/grpc/testing/LoadBalancerAccumulatedStatsResponse.ts @@ -32,16 +32,19 @@ export interface LoadBalancerAccumulatedStatsResponse { /** * The total number of RPCs have ever issued for each type. * Deprecated: use stats_per_method.rpcs_started instead. + * @deprecated */ 'num_rpcs_started_by_method'?: ({[key: string]: number}); /** * The total number of RPCs have ever completed successfully for each type. * Deprecated: use stats_per_method.result instead. + * @deprecated */ 'num_rpcs_succeeded_by_method'?: ({[key: string]: number}); /** * The total number of RPCs have ever failed for each type. * Deprecated: use stats_per_method.result instead. + * @deprecated */ 'num_rpcs_failed_by_method'?: ({[key: string]: number}); /** @@ -58,21 +61,24 @@ export interface LoadBalancerAccumulatedStatsResponse__Output { /** * The total number of RPCs have ever issued for each type. * Deprecated: use stats_per_method.rpcs_started instead. + * @deprecated */ 'num_rpcs_started_by_method': ({[key: string]: number}); /** * The total number of RPCs have ever completed successfully for each type. * Deprecated: use stats_per_method.result instead. + * @deprecated */ 'num_rpcs_succeeded_by_method': ({[key: string]: number}); /** * The total number of RPCs have ever failed for each type. * Deprecated: use stats_per_method.result instead. + * @deprecated */ 'num_rpcs_failed_by_method': ({[key: string]: number}); /** * Per-method RPC statistics. The key is the RpcType in string form; e.g. * 'EMPTY_CALL' or 'UNARY_CALL' */ - 'stats_per_method'?: ({[key: string]: _grpc_testing_LoadBalancerAccumulatedStatsResponse_MethodStats__Output}); + 'stats_per_method': ({[key: string]: _grpc_testing_LoadBalancerAccumulatedStatsResponse_MethodStats__Output}); } diff --git a/packages/grpc-js-xds/interop/generated/grpc/testing/LoadBalancerStatsResponse.ts b/packages/grpc-js-xds/interop/generated/grpc/testing/LoadBalancerStatsResponse.ts index 184a6e258..ab33612c3 100644 --- a/packages/grpc-js-xds/interop/generated/grpc/testing/LoadBalancerStatsResponse.ts +++ b/packages/grpc-js-xds/interop/generated/grpc/testing/LoadBalancerStatsResponse.ts @@ -36,5 +36,5 @@ export interface LoadBalancerStatsResponse__Output { * The number of RPCs that failed to record a remote peer. */ 'num_failures': (number); - 'rpcs_by_method'?: ({[key: string]: _grpc_testing_LoadBalancerStatsResponse_RpcsByPeer__Output}); + 'rpcs_by_method': ({[key: string]: _grpc_testing_LoadBalancerStatsResponse_RpcsByPeer__Output}); } diff --git a/packages/grpc-js-xds/interop/generated/grpc/testing/LoadBalancerStatsService.ts b/packages/grpc-js-xds/interop/generated/grpc/testing/LoadBalancerStatsService.ts index 26cfee9d7..9d11d9418 100644 --- a/packages/grpc-js-xds/interop/generated/grpc/testing/LoadBalancerStatsService.ts +++ b/packages/grpc-js-xds/interop/generated/grpc/testing/LoadBalancerStatsService.ts @@ -1,6 +1,7 @@ // Original file: proto/grpc/testing/test.proto import type * as grpc from '@grpc/grpc-js' +import type { MethodDefinition } from '@grpc/proto-loader' import type { LoadBalancerAccumulatedStatsRequest as _grpc_testing_LoadBalancerAccumulatedStatsRequest, LoadBalancerAccumulatedStatsRequest__Output as _grpc_testing_LoadBalancerAccumulatedStatsRequest__Output } from '../../grpc/testing/LoadBalancerAccumulatedStatsRequest'; import type { LoadBalancerAccumulatedStatsResponse as _grpc_testing_LoadBalancerAccumulatedStatsResponse, LoadBalancerAccumulatedStatsResponse__Output as _grpc_testing_LoadBalancerAccumulatedStatsResponse__Output } from '../../grpc/testing/LoadBalancerAccumulatedStatsResponse'; import type { LoadBalancerStatsRequest as _grpc_testing_LoadBalancerStatsRequest, LoadBalancerStatsRequest__Output as _grpc_testing_LoadBalancerStatsRequest__Output } from '../../grpc/testing/LoadBalancerStatsRequest'; @@ -13,32 +14,32 @@ export interface LoadBalancerStatsServiceClient extends grpc.Client { /** * Gets the accumulated stats for RPCs sent by a test client. */ - GetClientAccumulatedStats(argument: _grpc_testing_LoadBalancerAccumulatedStatsRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_LoadBalancerAccumulatedStatsResponse__Output) => void): grpc.ClientUnaryCall; - GetClientAccumulatedStats(argument: _grpc_testing_LoadBalancerAccumulatedStatsRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_LoadBalancerAccumulatedStatsResponse__Output) => void): grpc.ClientUnaryCall; - GetClientAccumulatedStats(argument: _grpc_testing_LoadBalancerAccumulatedStatsRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_LoadBalancerAccumulatedStatsResponse__Output) => void): grpc.ClientUnaryCall; - GetClientAccumulatedStats(argument: _grpc_testing_LoadBalancerAccumulatedStatsRequest, callback: (error?: grpc.ServiceError, result?: _grpc_testing_LoadBalancerAccumulatedStatsResponse__Output) => void): grpc.ClientUnaryCall; + GetClientAccumulatedStats(argument: _grpc_testing_LoadBalancerAccumulatedStatsRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_LoadBalancerAccumulatedStatsResponse__Output>): grpc.ClientUnaryCall; + GetClientAccumulatedStats(argument: _grpc_testing_LoadBalancerAccumulatedStatsRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_LoadBalancerAccumulatedStatsResponse__Output>): grpc.ClientUnaryCall; + GetClientAccumulatedStats(argument: _grpc_testing_LoadBalancerAccumulatedStatsRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_LoadBalancerAccumulatedStatsResponse__Output>): grpc.ClientUnaryCall; + GetClientAccumulatedStats(argument: _grpc_testing_LoadBalancerAccumulatedStatsRequest, callback: grpc.requestCallback<_grpc_testing_LoadBalancerAccumulatedStatsResponse__Output>): grpc.ClientUnaryCall; /** * Gets the accumulated stats for RPCs sent by a test client. */ - getClientAccumulatedStats(argument: _grpc_testing_LoadBalancerAccumulatedStatsRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_LoadBalancerAccumulatedStatsResponse__Output) => void): grpc.ClientUnaryCall; - getClientAccumulatedStats(argument: _grpc_testing_LoadBalancerAccumulatedStatsRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_LoadBalancerAccumulatedStatsResponse__Output) => void): grpc.ClientUnaryCall; - getClientAccumulatedStats(argument: _grpc_testing_LoadBalancerAccumulatedStatsRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_LoadBalancerAccumulatedStatsResponse__Output) => void): grpc.ClientUnaryCall; - getClientAccumulatedStats(argument: _grpc_testing_LoadBalancerAccumulatedStatsRequest, callback: (error?: grpc.ServiceError, result?: _grpc_testing_LoadBalancerAccumulatedStatsResponse__Output) => void): grpc.ClientUnaryCall; + getClientAccumulatedStats(argument: _grpc_testing_LoadBalancerAccumulatedStatsRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_LoadBalancerAccumulatedStatsResponse__Output>): grpc.ClientUnaryCall; + getClientAccumulatedStats(argument: _grpc_testing_LoadBalancerAccumulatedStatsRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_LoadBalancerAccumulatedStatsResponse__Output>): grpc.ClientUnaryCall; + getClientAccumulatedStats(argument: _grpc_testing_LoadBalancerAccumulatedStatsRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_LoadBalancerAccumulatedStatsResponse__Output>): grpc.ClientUnaryCall; + getClientAccumulatedStats(argument: _grpc_testing_LoadBalancerAccumulatedStatsRequest, callback: grpc.requestCallback<_grpc_testing_LoadBalancerAccumulatedStatsResponse__Output>): grpc.ClientUnaryCall; /** * Gets the backend distribution for RPCs sent by a test client. */ - GetClientStats(argument: _grpc_testing_LoadBalancerStatsRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_LoadBalancerStatsResponse__Output) => void): grpc.ClientUnaryCall; - GetClientStats(argument: _grpc_testing_LoadBalancerStatsRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_LoadBalancerStatsResponse__Output) => void): grpc.ClientUnaryCall; - GetClientStats(argument: _grpc_testing_LoadBalancerStatsRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_LoadBalancerStatsResponse__Output) => void): grpc.ClientUnaryCall; - GetClientStats(argument: _grpc_testing_LoadBalancerStatsRequest, callback: (error?: grpc.ServiceError, result?: _grpc_testing_LoadBalancerStatsResponse__Output) => void): grpc.ClientUnaryCall; + GetClientStats(argument: _grpc_testing_LoadBalancerStatsRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_LoadBalancerStatsResponse__Output>): grpc.ClientUnaryCall; + GetClientStats(argument: _grpc_testing_LoadBalancerStatsRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_LoadBalancerStatsResponse__Output>): grpc.ClientUnaryCall; + GetClientStats(argument: _grpc_testing_LoadBalancerStatsRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_LoadBalancerStatsResponse__Output>): grpc.ClientUnaryCall; + GetClientStats(argument: _grpc_testing_LoadBalancerStatsRequest, callback: grpc.requestCallback<_grpc_testing_LoadBalancerStatsResponse__Output>): grpc.ClientUnaryCall; /** * Gets the backend distribution for RPCs sent by a test client. */ - getClientStats(argument: _grpc_testing_LoadBalancerStatsRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_LoadBalancerStatsResponse__Output) => void): grpc.ClientUnaryCall; - getClientStats(argument: _grpc_testing_LoadBalancerStatsRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_LoadBalancerStatsResponse__Output) => void): grpc.ClientUnaryCall; - getClientStats(argument: _grpc_testing_LoadBalancerStatsRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_LoadBalancerStatsResponse__Output) => void): grpc.ClientUnaryCall; - getClientStats(argument: _grpc_testing_LoadBalancerStatsRequest, callback: (error?: grpc.ServiceError, result?: _grpc_testing_LoadBalancerStatsResponse__Output) => void): grpc.ClientUnaryCall; + getClientStats(argument: _grpc_testing_LoadBalancerStatsRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_LoadBalancerStatsResponse__Output>): grpc.ClientUnaryCall; + getClientStats(argument: _grpc_testing_LoadBalancerStatsRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_LoadBalancerStatsResponse__Output>): grpc.ClientUnaryCall; + getClientStats(argument: _grpc_testing_LoadBalancerStatsRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_LoadBalancerStatsResponse__Output>): grpc.ClientUnaryCall; + getClientStats(argument: _grpc_testing_LoadBalancerStatsRequest, callback: grpc.requestCallback<_grpc_testing_LoadBalancerStatsResponse__Output>): grpc.ClientUnaryCall; } @@ -57,3 +58,8 @@ export interface LoadBalancerStatsServiceHandlers extends grpc.UntypedServiceImp GetClientStats: grpc.handleUnaryCall<_grpc_testing_LoadBalancerStatsRequest__Output, _grpc_testing_LoadBalancerStatsResponse>; } + +export interface LoadBalancerStatsServiceDefinition extends grpc.ServiceDefinition { + GetClientAccumulatedStats: MethodDefinition<_grpc_testing_LoadBalancerAccumulatedStatsRequest, _grpc_testing_LoadBalancerAccumulatedStatsResponse, _grpc_testing_LoadBalancerAccumulatedStatsRequest__Output, _grpc_testing_LoadBalancerAccumulatedStatsResponse__Output> + GetClientStats: MethodDefinition<_grpc_testing_LoadBalancerStatsRequest, _grpc_testing_LoadBalancerStatsResponse, _grpc_testing_LoadBalancerStatsRequest__Output, _grpc_testing_LoadBalancerStatsResponse__Output> +} diff --git a/packages/grpc-js-xds/interop/generated/grpc/testing/Payload.ts b/packages/grpc-js-xds/interop/generated/grpc/testing/Payload.ts index 79102d2bf..17eb9e60a 100644 --- a/packages/grpc-js-xds/interop/generated/grpc/testing/Payload.ts +++ b/packages/grpc-js-xds/interop/generated/grpc/testing/Payload.ts @@ -1,6 +1,6 @@ // Original file: proto/grpc/testing/messages.proto -import type { PayloadType as _grpc_testing_PayloadType } from '../../grpc/testing/PayloadType'; +import type { PayloadType as _grpc_testing_PayloadType, PayloadType__Output as _grpc_testing_PayloadType__Output } from '../../grpc/testing/PayloadType'; /** * A block of data, to simply increase gRPC message size. @@ -9,7 +9,7 @@ export interface Payload { /** * The type of data in body. */ - 'type'?: (_grpc_testing_PayloadType | keyof typeof _grpc_testing_PayloadType); + 'type'?: (_grpc_testing_PayloadType); /** * Primary contents of payload. */ @@ -23,7 +23,7 @@ export interface Payload__Output { /** * The type of data in body. */ - 'type': (keyof typeof _grpc_testing_PayloadType); + 'type': (_grpc_testing_PayloadType__Output); /** * Primary contents of payload. */ diff --git a/packages/grpc-js-xds/interop/generated/grpc/testing/PayloadType.ts b/packages/grpc-js-xds/interop/generated/grpc/testing/PayloadType.ts index 3cf9d375a..64e526090 100644 --- a/packages/grpc-js-xds/interop/generated/grpc/testing/PayloadType.ts +++ b/packages/grpc-js-xds/interop/generated/grpc/testing/PayloadType.ts @@ -3,9 +3,24 @@ /** * The type of payload that should be returned. */ -export enum PayloadType { +export const PayloadType = { /** * Compressable text format. */ - COMPRESSABLE = 0, -} + COMPRESSABLE: 'COMPRESSABLE', +} as const; + +/** + * The type of payload that should be returned. + */ +export type PayloadType = + /** + * Compressable text format. + */ + | 'COMPRESSABLE' + | 0 + +/** + * The type of payload that should be returned. + */ +export type PayloadType__Output = typeof PayloadType[keyof typeof PayloadType] diff --git a/packages/grpc-js-xds/interop/generated/grpc/testing/ReconnectService.ts b/packages/grpc-js-xds/interop/generated/grpc/testing/ReconnectService.ts index e489e2849..2e3f25680 100644 --- a/packages/grpc-js-xds/interop/generated/grpc/testing/ReconnectService.ts +++ b/packages/grpc-js-xds/interop/generated/grpc/testing/ReconnectService.ts @@ -1,6 +1,7 @@ // Original file: proto/grpc/testing/test.proto import type * as grpc from '@grpc/grpc-js' +import type { MethodDefinition } from '@grpc/proto-loader' import type { Empty as _grpc_testing_Empty, Empty__Output as _grpc_testing_Empty__Output } from '../../grpc/testing/Empty'; import type { ReconnectInfo as _grpc_testing_ReconnectInfo, ReconnectInfo__Output as _grpc_testing_ReconnectInfo__Output } from '../../grpc/testing/ReconnectInfo'; import type { ReconnectParams as _grpc_testing_ReconnectParams, ReconnectParams__Output as _grpc_testing_ReconnectParams__Output } from '../../grpc/testing/ReconnectParams'; @@ -9,23 +10,23 @@ import type { ReconnectParams as _grpc_testing_ReconnectParams, ReconnectParams_ * A service used to control reconnect server. */ export interface ReconnectServiceClient extends grpc.Client { - Start(argument: _grpc_testing_ReconnectParams, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - Start(argument: _grpc_testing_ReconnectParams, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - Start(argument: _grpc_testing_ReconnectParams, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - Start(argument: _grpc_testing_ReconnectParams, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - start(argument: _grpc_testing_ReconnectParams, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - start(argument: _grpc_testing_ReconnectParams, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - start(argument: _grpc_testing_ReconnectParams, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - start(argument: _grpc_testing_ReconnectParams, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; + Start(argument: _grpc_testing_ReconnectParams, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + Start(argument: _grpc_testing_ReconnectParams, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + Start(argument: _grpc_testing_ReconnectParams, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + Start(argument: _grpc_testing_ReconnectParams, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + start(argument: _grpc_testing_ReconnectParams, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + start(argument: _grpc_testing_ReconnectParams, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + start(argument: _grpc_testing_ReconnectParams, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + start(argument: _grpc_testing_ReconnectParams, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; - Stop(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_ReconnectInfo__Output) => void): grpc.ClientUnaryCall; - Stop(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_ReconnectInfo__Output) => void): grpc.ClientUnaryCall; - Stop(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_ReconnectInfo__Output) => void): grpc.ClientUnaryCall; - Stop(argument: _grpc_testing_Empty, callback: (error?: grpc.ServiceError, result?: _grpc_testing_ReconnectInfo__Output) => void): grpc.ClientUnaryCall; - stop(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_ReconnectInfo__Output) => void): grpc.ClientUnaryCall; - stop(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_ReconnectInfo__Output) => void): grpc.ClientUnaryCall; - stop(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_ReconnectInfo__Output) => void): grpc.ClientUnaryCall; - stop(argument: _grpc_testing_Empty, callback: (error?: grpc.ServiceError, result?: _grpc_testing_ReconnectInfo__Output) => void): grpc.ClientUnaryCall; + Stop(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_ReconnectInfo__Output>): grpc.ClientUnaryCall; + Stop(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_ReconnectInfo__Output>): grpc.ClientUnaryCall; + Stop(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_ReconnectInfo__Output>): grpc.ClientUnaryCall; + Stop(argument: _grpc_testing_Empty, callback: grpc.requestCallback<_grpc_testing_ReconnectInfo__Output>): grpc.ClientUnaryCall; + stop(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_ReconnectInfo__Output>): grpc.ClientUnaryCall; + stop(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_ReconnectInfo__Output>): grpc.ClientUnaryCall; + stop(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_ReconnectInfo__Output>): grpc.ClientUnaryCall; + stop(argument: _grpc_testing_Empty, callback: grpc.requestCallback<_grpc_testing_ReconnectInfo__Output>): grpc.ClientUnaryCall; } @@ -38,3 +39,8 @@ export interface ReconnectServiceHandlers extends grpc.UntypedServiceImplementat Stop: grpc.handleUnaryCall<_grpc_testing_Empty__Output, _grpc_testing_ReconnectInfo>; } + +export interface ReconnectServiceDefinition extends grpc.ServiceDefinition { + Start: MethodDefinition<_grpc_testing_ReconnectParams, _grpc_testing_Empty, _grpc_testing_ReconnectParams__Output, _grpc_testing_Empty__Output> + Stop: MethodDefinition<_grpc_testing_Empty, _grpc_testing_ReconnectInfo, _grpc_testing_Empty__Output, _grpc_testing_ReconnectInfo__Output> +} diff --git a/packages/grpc-js-xds/interop/generated/grpc/testing/ResponseParameters.ts b/packages/grpc-js-xds/interop/generated/grpc/testing/ResponseParameters.ts index 04ca94ced..15f2f01f4 100644 --- a/packages/grpc-js-xds/interop/generated/grpc/testing/ResponseParameters.ts +++ b/packages/grpc-js-xds/interop/generated/grpc/testing/ResponseParameters.ts @@ -21,7 +21,7 @@ export interface ResponseParameters { * implement the full compression tests by introspecting the call to verify * the response's compression status. */ - 'compressed'?: (_grpc_testing_BoolValue); + 'compressed'?: (_grpc_testing_BoolValue | null); } /** @@ -43,5 +43,5 @@ export interface ResponseParameters__Output { * implement the full compression tests by introspecting the call to verify * the response's compression status. */ - 'compressed'?: (_grpc_testing_BoolValue__Output); + 'compressed': (_grpc_testing_BoolValue__Output | null); } diff --git a/packages/grpc-js-xds/interop/generated/grpc/testing/SimpleRequest.ts b/packages/grpc-js-xds/interop/generated/grpc/testing/SimpleRequest.ts index 056eb10b2..21843af69 100644 --- a/packages/grpc-js-xds/interop/generated/grpc/testing/SimpleRequest.ts +++ b/packages/grpc-js-xds/interop/generated/grpc/testing/SimpleRequest.ts @@ -1,6 +1,6 @@ // Original file: proto/grpc/testing/messages.proto -import type { PayloadType as _grpc_testing_PayloadType } from '../../grpc/testing/PayloadType'; +import type { PayloadType as _grpc_testing_PayloadType, PayloadType__Output as _grpc_testing_PayloadType__Output } from '../../grpc/testing/PayloadType'; import type { Payload as _grpc_testing_Payload, Payload__Output as _grpc_testing_Payload__Output } from '../../grpc/testing/Payload'; import type { BoolValue as _grpc_testing_BoolValue, BoolValue__Output as _grpc_testing_BoolValue__Output } from '../../grpc/testing/BoolValue'; import type { EchoStatus as _grpc_testing_EchoStatus, EchoStatus__Output as _grpc_testing_EchoStatus__Output } from '../../grpc/testing/EchoStatus'; @@ -13,7 +13,7 @@ export interface SimpleRequest { * Desired payload type in the response from the server. * If response_type is RANDOM, server randomly chooses one from other formats. */ - 'response_type'?: (_grpc_testing_PayloadType | keyof typeof _grpc_testing_PayloadType); + 'response_type'?: (_grpc_testing_PayloadType); /** * Desired payload size in the response from the server. */ @@ -21,7 +21,7 @@ export interface SimpleRequest { /** * Optional input payload sent along with the request. */ - 'payload'?: (_grpc_testing_Payload); + 'payload'?: (_grpc_testing_Payload | null); /** * Whether SimpleResponse should include username. */ @@ -36,15 +36,15 @@ export interface SimpleRequest { * implement the full compression tests by introspecting the call to verify * the response's compression status. */ - 'response_compressed'?: (_grpc_testing_BoolValue); + 'response_compressed'?: (_grpc_testing_BoolValue | null); /** * Whether server should return a given status */ - 'response_status'?: (_grpc_testing_EchoStatus); + 'response_status'?: (_grpc_testing_EchoStatus | null); /** * Whether the server should expect this request to be compressed. */ - 'expect_compressed'?: (_grpc_testing_BoolValue); + 'expect_compressed'?: (_grpc_testing_BoolValue | null); /** * Whether SimpleResponse should include server_id. */ @@ -63,7 +63,7 @@ export interface SimpleRequest__Output { * Desired payload type in the response from the server. * If response_type is RANDOM, server randomly chooses one from other formats. */ - 'response_type': (keyof typeof _grpc_testing_PayloadType); + 'response_type': (_grpc_testing_PayloadType__Output); /** * Desired payload size in the response from the server. */ @@ -71,7 +71,7 @@ export interface SimpleRequest__Output { /** * Optional input payload sent along with the request. */ - 'payload'?: (_grpc_testing_Payload__Output); + 'payload': (_grpc_testing_Payload__Output | null); /** * Whether SimpleResponse should include username. */ @@ -86,15 +86,15 @@ export interface SimpleRequest__Output { * implement the full compression tests by introspecting the call to verify * the response's compression status. */ - 'response_compressed'?: (_grpc_testing_BoolValue__Output); + 'response_compressed': (_grpc_testing_BoolValue__Output | null); /** * Whether server should return a given status */ - 'response_status'?: (_grpc_testing_EchoStatus__Output); + 'response_status': (_grpc_testing_EchoStatus__Output | null); /** * Whether the server should expect this request to be compressed. */ - 'expect_compressed'?: (_grpc_testing_BoolValue__Output); + 'expect_compressed': (_grpc_testing_BoolValue__Output | null); /** * Whether SimpleResponse should include server_id. */ diff --git a/packages/grpc-js-xds/interop/generated/grpc/testing/SimpleResponse.ts b/packages/grpc-js-xds/interop/generated/grpc/testing/SimpleResponse.ts index 661f336ce..b737c31fa 100644 --- a/packages/grpc-js-xds/interop/generated/grpc/testing/SimpleResponse.ts +++ b/packages/grpc-js-xds/interop/generated/grpc/testing/SimpleResponse.ts @@ -1,7 +1,7 @@ // Original file: proto/grpc/testing/messages.proto import type { Payload as _grpc_testing_Payload, Payload__Output as _grpc_testing_Payload__Output } from '../../grpc/testing/Payload'; -import type { GrpclbRouteType as _grpc_testing_GrpclbRouteType } from '../../grpc/testing/GrpclbRouteType'; +import type { GrpclbRouteType as _grpc_testing_GrpclbRouteType, GrpclbRouteType__Output as _grpc_testing_GrpclbRouteType__Output } from '../../grpc/testing/GrpclbRouteType'; /** * Unary response, as configured by the request. @@ -10,7 +10,7 @@ export interface SimpleResponse { /** * Payload to increase message size. */ - 'payload'?: (_grpc_testing_Payload); + 'payload'?: (_grpc_testing_Payload | null); /** * The user the request came from, for verifying authentication was * successful when the client expected it. @@ -28,7 +28,7 @@ export interface SimpleResponse { /** * gRPCLB Path. */ - 'grpclb_route_type'?: (_grpc_testing_GrpclbRouteType | keyof typeof _grpc_testing_GrpclbRouteType); + 'grpclb_route_type'?: (_grpc_testing_GrpclbRouteType); /** * Server hostname. */ @@ -42,7 +42,7 @@ export interface SimpleResponse__Output { /** * Payload to increase message size. */ - 'payload'?: (_grpc_testing_Payload__Output); + 'payload': (_grpc_testing_Payload__Output | null); /** * The user the request came from, for verifying authentication was * successful when the client expected it. @@ -60,7 +60,7 @@ export interface SimpleResponse__Output { /** * gRPCLB Path. */ - 'grpclb_route_type': (keyof typeof _grpc_testing_GrpclbRouteType); + 'grpclb_route_type': (_grpc_testing_GrpclbRouteType__Output); /** * Server hostname. */ diff --git a/packages/grpc-js-xds/interop/generated/grpc/testing/StreamingInputCallRequest.ts b/packages/grpc-js-xds/interop/generated/grpc/testing/StreamingInputCallRequest.ts index 56ad2b217..f45568849 100644 --- a/packages/grpc-js-xds/interop/generated/grpc/testing/StreamingInputCallRequest.ts +++ b/packages/grpc-js-xds/interop/generated/grpc/testing/StreamingInputCallRequest.ts @@ -10,14 +10,14 @@ export interface StreamingInputCallRequest { /** * Optional input payload sent along with the request. */ - 'payload'?: (_grpc_testing_Payload); + 'payload'?: (_grpc_testing_Payload | null); /** * Whether the server should expect this request to be compressed. This field * is "nullable" in order to interoperate seamlessly with servers not able to * implement the full compression tests by introspecting the call to verify * the request's compression status. */ - 'expect_compressed'?: (_grpc_testing_BoolValue); + 'expect_compressed'?: (_grpc_testing_BoolValue | null); } /** @@ -27,12 +27,12 @@ export interface StreamingInputCallRequest__Output { /** * Optional input payload sent along with the request. */ - 'payload'?: (_grpc_testing_Payload__Output); + 'payload': (_grpc_testing_Payload__Output | null); /** * Whether the server should expect this request to be compressed. This field * is "nullable" in order to interoperate seamlessly with servers not able to * implement the full compression tests by introspecting the call to verify * the request's compression status. */ - 'expect_compressed'?: (_grpc_testing_BoolValue__Output); + 'expect_compressed': (_grpc_testing_BoolValue__Output | null); } diff --git a/packages/grpc-js-xds/interop/generated/grpc/testing/StreamingOutputCallRequest.ts b/packages/grpc-js-xds/interop/generated/grpc/testing/StreamingOutputCallRequest.ts index 52922062d..0d812b74f 100644 --- a/packages/grpc-js-xds/interop/generated/grpc/testing/StreamingOutputCallRequest.ts +++ b/packages/grpc-js-xds/interop/generated/grpc/testing/StreamingOutputCallRequest.ts @@ -1,6 +1,6 @@ // Original file: proto/grpc/testing/messages.proto -import type { PayloadType as _grpc_testing_PayloadType } from '../../grpc/testing/PayloadType'; +import type { PayloadType as _grpc_testing_PayloadType, PayloadType__Output as _grpc_testing_PayloadType__Output } from '../../grpc/testing/PayloadType'; import type { ResponseParameters as _grpc_testing_ResponseParameters, ResponseParameters__Output as _grpc_testing_ResponseParameters__Output } from '../../grpc/testing/ResponseParameters'; import type { Payload as _grpc_testing_Payload, Payload__Output as _grpc_testing_Payload__Output } from '../../grpc/testing/Payload'; import type { EchoStatus as _grpc_testing_EchoStatus, EchoStatus__Output as _grpc_testing_EchoStatus__Output } from '../../grpc/testing/EchoStatus'; @@ -15,7 +15,7 @@ export interface StreamingOutputCallRequest { * might be of different types. This is to simulate a mixed type of payload * stream. */ - 'response_type'?: (_grpc_testing_PayloadType | keyof typeof _grpc_testing_PayloadType); + 'response_type'?: (_grpc_testing_PayloadType); /** * Configuration for each expected response message. */ @@ -23,11 +23,11 @@ export interface StreamingOutputCallRequest { /** * Optional input payload sent along with the request. */ - 'payload'?: (_grpc_testing_Payload); + 'payload'?: (_grpc_testing_Payload | null); /** * Whether server should return a given status */ - 'response_status'?: (_grpc_testing_EchoStatus); + 'response_status'?: (_grpc_testing_EchoStatus | null); } /** @@ -40,7 +40,7 @@ export interface StreamingOutputCallRequest__Output { * might be of different types. This is to simulate a mixed type of payload * stream. */ - 'response_type': (keyof typeof _grpc_testing_PayloadType); + 'response_type': (_grpc_testing_PayloadType__Output); /** * Configuration for each expected response message. */ @@ -48,9 +48,9 @@ export interface StreamingOutputCallRequest__Output { /** * Optional input payload sent along with the request. */ - 'payload'?: (_grpc_testing_Payload__Output); + 'payload': (_grpc_testing_Payload__Output | null); /** * Whether server should return a given status */ - 'response_status'?: (_grpc_testing_EchoStatus__Output); + 'response_status': (_grpc_testing_EchoStatus__Output | null); } diff --git a/packages/grpc-js-xds/interop/generated/grpc/testing/StreamingOutputCallResponse.ts b/packages/grpc-js-xds/interop/generated/grpc/testing/StreamingOutputCallResponse.ts index 19ab306dd..e2eb435cd 100644 --- a/packages/grpc-js-xds/interop/generated/grpc/testing/StreamingOutputCallResponse.ts +++ b/packages/grpc-js-xds/interop/generated/grpc/testing/StreamingOutputCallResponse.ts @@ -9,7 +9,7 @@ export interface StreamingOutputCallResponse { /** * Payload to increase response size. */ - 'payload'?: (_grpc_testing_Payload); + 'payload'?: (_grpc_testing_Payload | null); } /** @@ -19,5 +19,5 @@ export interface StreamingOutputCallResponse__Output { /** * Payload to increase response size. */ - 'payload'?: (_grpc_testing_Payload__Output); + 'payload': (_grpc_testing_Payload__Output | null); } diff --git a/packages/grpc-js-xds/interop/generated/grpc/testing/TestService.ts b/packages/grpc-js-xds/interop/generated/grpc/testing/TestService.ts index dbb606c83..139d3c0ef 100644 --- a/packages/grpc-js-xds/interop/generated/grpc/testing/TestService.ts +++ b/packages/grpc-js-xds/interop/generated/grpc/testing/TestService.ts @@ -1,6 +1,7 @@ // Original file: proto/grpc/testing/test.proto import type * as grpc from '@grpc/grpc-js' +import type { MethodDefinition } from '@grpc/proto-loader' import type { Empty as _grpc_testing_Empty, Empty__Output as _grpc_testing_Empty__Output } from '../../grpc/testing/Empty'; import type { SimpleRequest as _grpc_testing_SimpleRequest, SimpleRequest__Output as _grpc_testing_SimpleRequest__Output } from '../../grpc/testing/SimpleRequest'; import type { SimpleResponse as _grpc_testing_SimpleResponse, SimpleResponse__Output as _grpc_testing_SimpleResponse__Output } from '../../grpc/testing/SimpleResponse'; @@ -19,34 +20,34 @@ export interface TestServiceClient extends grpc.Client { * headers set such that a caching HTTP proxy (such as GFE) can * satisfy subsequent requests. */ - CacheableUnaryCall(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_SimpleResponse__Output) => void): grpc.ClientUnaryCall; - CacheableUnaryCall(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_SimpleResponse__Output) => void): grpc.ClientUnaryCall; - CacheableUnaryCall(argument: _grpc_testing_SimpleRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_SimpleResponse__Output) => void): grpc.ClientUnaryCall; - CacheableUnaryCall(argument: _grpc_testing_SimpleRequest, callback: (error?: grpc.ServiceError, result?: _grpc_testing_SimpleResponse__Output) => void): grpc.ClientUnaryCall; + CacheableUnaryCall(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + CacheableUnaryCall(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + CacheableUnaryCall(argument: _grpc_testing_SimpleRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + CacheableUnaryCall(argument: _grpc_testing_SimpleRequest, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; /** * One request followed by one response. Response has cache control * headers set such that a caching HTTP proxy (such as GFE) can * satisfy subsequent requests. */ - cacheableUnaryCall(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_SimpleResponse__Output) => void): grpc.ClientUnaryCall; - cacheableUnaryCall(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_SimpleResponse__Output) => void): grpc.ClientUnaryCall; - cacheableUnaryCall(argument: _grpc_testing_SimpleRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_SimpleResponse__Output) => void): grpc.ClientUnaryCall; - cacheableUnaryCall(argument: _grpc_testing_SimpleRequest, callback: (error?: grpc.ServiceError, result?: _grpc_testing_SimpleResponse__Output) => void): grpc.ClientUnaryCall; + cacheableUnaryCall(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + cacheableUnaryCall(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + cacheableUnaryCall(argument: _grpc_testing_SimpleRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + cacheableUnaryCall(argument: _grpc_testing_SimpleRequest, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; /** * One empty request followed by one empty response. */ - EmptyCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - EmptyCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - EmptyCall(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - EmptyCall(argument: _grpc_testing_Empty, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; + EmptyCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + EmptyCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + EmptyCall(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + EmptyCall(argument: _grpc_testing_Empty, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; /** * One empty request followed by one empty response. */ - emptyCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - emptyCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - emptyCall(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - emptyCall(argument: _grpc_testing_Empty, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; + emptyCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + emptyCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + emptyCall(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + emptyCall(argument: _grpc_testing_Empty, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; /** * A sequence of requests with each request served by the server immediately. @@ -84,18 +85,18 @@ export interface TestServiceClient extends grpc.Client { * A sequence of requests followed by one response (streamed upload). * The server returns the aggregated size of client payload as the result. */ - StreamingInputCall(metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_StreamingInputCallResponse__Output) => void): grpc.ClientWritableStream<_grpc_testing_StreamingInputCallRequest>; - StreamingInputCall(metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_StreamingInputCallResponse__Output) => void): grpc.ClientWritableStream<_grpc_testing_StreamingInputCallRequest>; - StreamingInputCall(options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_StreamingInputCallResponse__Output) => void): grpc.ClientWritableStream<_grpc_testing_StreamingInputCallRequest>; - StreamingInputCall(callback: (error?: grpc.ServiceError, result?: _grpc_testing_StreamingInputCallResponse__Output) => void): grpc.ClientWritableStream<_grpc_testing_StreamingInputCallRequest>; + StreamingInputCall(metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_StreamingInputCallResponse__Output>): grpc.ClientWritableStream<_grpc_testing_StreamingInputCallRequest>; + StreamingInputCall(metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_StreamingInputCallResponse__Output>): grpc.ClientWritableStream<_grpc_testing_StreamingInputCallRequest>; + StreamingInputCall(options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_StreamingInputCallResponse__Output>): grpc.ClientWritableStream<_grpc_testing_StreamingInputCallRequest>; + StreamingInputCall(callback: grpc.requestCallback<_grpc_testing_StreamingInputCallResponse__Output>): grpc.ClientWritableStream<_grpc_testing_StreamingInputCallRequest>; /** * A sequence of requests followed by one response (streamed upload). * The server returns the aggregated size of client payload as the result. */ - streamingInputCall(metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_StreamingInputCallResponse__Output) => void): grpc.ClientWritableStream<_grpc_testing_StreamingInputCallRequest>; - streamingInputCall(metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_StreamingInputCallResponse__Output) => void): grpc.ClientWritableStream<_grpc_testing_StreamingInputCallRequest>; - streamingInputCall(options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_StreamingInputCallResponse__Output) => void): grpc.ClientWritableStream<_grpc_testing_StreamingInputCallRequest>; - streamingInputCall(callback: (error?: grpc.ServiceError, result?: _grpc_testing_StreamingInputCallResponse__Output) => void): grpc.ClientWritableStream<_grpc_testing_StreamingInputCallRequest>; + streamingInputCall(metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_StreamingInputCallResponse__Output>): grpc.ClientWritableStream<_grpc_testing_StreamingInputCallRequest>; + streamingInputCall(metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_StreamingInputCallResponse__Output>): grpc.ClientWritableStream<_grpc_testing_StreamingInputCallRequest>; + streamingInputCall(options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_StreamingInputCallResponse__Output>): grpc.ClientWritableStream<_grpc_testing_StreamingInputCallRequest>; + streamingInputCall(callback: grpc.requestCallback<_grpc_testing_StreamingInputCallResponse__Output>): grpc.ClientWritableStream<_grpc_testing_StreamingInputCallRequest>; /** * One request followed by a sequence of responses (streamed download). @@ -113,34 +114,34 @@ export interface TestServiceClient extends grpc.Client { /** * One request followed by one response. */ - UnaryCall(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_SimpleResponse__Output) => void): grpc.ClientUnaryCall; - UnaryCall(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_SimpleResponse__Output) => void): grpc.ClientUnaryCall; - UnaryCall(argument: _grpc_testing_SimpleRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_SimpleResponse__Output) => void): grpc.ClientUnaryCall; - UnaryCall(argument: _grpc_testing_SimpleRequest, callback: (error?: grpc.ServiceError, result?: _grpc_testing_SimpleResponse__Output) => void): grpc.ClientUnaryCall; + UnaryCall(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + UnaryCall(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + UnaryCall(argument: _grpc_testing_SimpleRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + UnaryCall(argument: _grpc_testing_SimpleRequest, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; /** * One request followed by one response. */ - unaryCall(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_SimpleResponse__Output) => void): grpc.ClientUnaryCall; - unaryCall(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_SimpleResponse__Output) => void): grpc.ClientUnaryCall; - unaryCall(argument: _grpc_testing_SimpleRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_SimpleResponse__Output) => void): grpc.ClientUnaryCall; - unaryCall(argument: _grpc_testing_SimpleRequest, callback: (error?: grpc.ServiceError, result?: _grpc_testing_SimpleResponse__Output) => void): grpc.ClientUnaryCall; + unaryCall(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + unaryCall(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + unaryCall(argument: _grpc_testing_SimpleRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + unaryCall(argument: _grpc_testing_SimpleRequest, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; /** * The test server will not implement this method. It will be used * to test the behavior when clients call unimplemented methods. */ - UnimplementedCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - UnimplementedCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - UnimplementedCall(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - UnimplementedCall(argument: _grpc_testing_Empty, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; + UnimplementedCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + UnimplementedCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + UnimplementedCall(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + UnimplementedCall(argument: _grpc_testing_Empty, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; /** * The test server will not implement this method. It will be used * to test the behavior when clients call unimplemented methods. */ - unimplementedCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - unimplementedCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - unimplementedCall(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - unimplementedCall(argument: _grpc_testing_Empty, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; + unimplementedCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + unimplementedCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + unimplementedCall(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + unimplementedCall(argument: _grpc_testing_Empty, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; } @@ -200,3 +201,14 @@ export interface TestServiceHandlers extends grpc.UntypedServiceImplementation { UnimplementedCall: grpc.handleUnaryCall<_grpc_testing_Empty__Output, _grpc_testing_Empty>; } + +export interface TestServiceDefinition extends grpc.ServiceDefinition { + CacheableUnaryCall: MethodDefinition<_grpc_testing_SimpleRequest, _grpc_testing_SimpleResponse, _grpc_testing_SimpleRequest__Output, _grpc_testing_SimpleResponse__Output> + EmptyCall: MethodDefinition<_grpc_testing_Empty, _grpc_testing_Empty, _grpc_testing_Empty__Output, _grpc_testing_Empty__Output> + FullDuplexCall: MethodDefinition<_grpc_testing_StreamingOutputCallRequest, _grpc_testing_StreamingOutputCallResponse, _grpc_testing_StreamingOutputCallRequest__Output, _grpc_testing_StreamingOutputCallResponse__Output> + HalfDuplexCall: MethodDefinition<_grpc_testing_StreamingOutputCallRequest, _grpc_testing_StreamingOutputCallResponse, _grpc_testing_StreamingOutputCallRequest__Output, _grpc_testing_StreamingOutputCallResponse__Output> + StreamingInputCall: MethodDefinition<_grpc_testing_StreamingInputCallRequest, _grpc_testing_StreamingInputCallResponse, _grpc_testing_StreamingInputCallRequest__Output, _grpc_testing_StreamingInputCallResponse__Output> + StreamingOutputCall: MethodDefinition<_grpc_testing_StreamingOutputCallRequest, _grpc_testing_StreamingOutputCallResponse, _grpc_testing_StreamingOutputCallRequest__Output, _grpc_testing_StreamingOutputCallResponse__Output> + UnaryCall: MethodDefinition<_grpc_testing_SimpleRequest, _grpc_testing_SimpleResponse, _grpc_testing_SimpleRequest__Output, _grpc_testing_SimpleResponse__Output> + UnimplementedCall: MethodDefinition<_grpc_testing_Empty, _grpc_testing_Empty, _grpc_testing_Empty__Output, _grpc_testing_Empty__Output> +} diff --git a/packages/grpc-js-xds/interop/generated/grpc/testing/UnimplementedService.ts b/packages/grpc-js-xds/interop/generated/grpc/testing/UnimplementedService.ts index d21dfcd0f..aea5d8b4a 100644 --- a/packages/grpc-js-xds/interop/generated/grpc/testing/UnimplementedService.ts +++ b/packages/grpc-js-xds/interop/generated/grpc/testing/UnimplementedService.ts @@ -1,6 +1,7 @@ // Original file: proto/grpc/testing/test.proto import type * as grpc from '@grpc/grpc-js' +import type { MethodDefinition } from '@grpc/proto-loader' import type { Empty as _grpc_testing_Empty, Empty__Output as _grpc_testing_Empty__Output } from '../../grpc/testing/Empty'; /** @@ -11,17 +12,17 @@ export interface UnimplementedServiceClient extends grpc.Client { /** * A call that no server should implement */ - UnimplementedCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - UnimplementedCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - UnimplementedCall(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - UnimplementedCall(argument: _grpc_testing_Empty, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; + UnimplementedCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + UnimplementedCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + UnimplementedCall(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + UnimplementedCall(argument: _grpc_testing_Empty, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; /** * A call that no server should implement */ - unimplementedCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - unimplementedCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - unimplementedCall(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - unimplementedCall(argument: _grpc_testing_Empty, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; + unimplementedCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + unimplementedCall(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + unimplementedCall(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + unimplementedCall(argument: _grpc_testing_Empty, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; } @@ -36,3 +37,7 @@ export interface UnimplementedServiceHandlers extends grpc.UntypedServiceImpleme UnimplementedCall: grpc.handleUnaryCall<_grpc_testing_Empty__Output, _grpc_testing_Empty>; } + +export interface UnimplementedServiceDefinition extends grpc.ServiceDefinition { + UnimplementedCall: MethodDefinition<_grpc_testing_Empty, _grpc_testing_Empty, _grpc_testing_Empty__Output, _grpc_testing_Empty__Output> +} diff --git a/packages/grpc-js-xds/interop/generated/grpc/testing/XdsUpdateClientConfigureService.ts b/packages/grpc-js-xds/interop/generated/grpc/testing/XdsUpdateClientConfigureService.ts index 22947619c..76826b812 100644 --- a/packages/grpc-js-xds/interop/generated/grpc/testing/XdsUpdateClientConfigureService.ts +++ b/packages/grpc-js-xds/interop/generated/grpc/testing/XdsUpdateClientConfigureService.ts @@ -1,6 +1,7 @@ // Original file: proto/grpc/testing/test.proto import type * as grpc from '@grpc/grpc-js' +import type { MethodDefinition } from '@grpc/proto-loader' import type { ClientConfigureRequest as _grpc_testing_ClientConfigureRequest, ClientConfigureRequest__Output as _grpc_testing_ClientConfigureRequest__Output } from '../../grpc/testing/ClientConfigureRequest'; import type { ClientConfigureResponse as _grpc_testing_ClientConfigureResponse, ClientConfigureResponse__Output as _grpc_testing_ClientConfigureResponse__Output } from '../../grpc/testing/ClientConfigureResponse'; @@ -11,17 +12,17 @@ export interface XdsUpdateClientConfigureServiceClient extends grpc.Client { /** * Update the tes client's configuration. */ - Configure(argument: _grpc_testing_ClientConfigureRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_ClientConfigureResponse__Output) => void): grpc.ClientUnaryCall; - Configure(argument: _grpc_testing_ClientConfigureRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_ClientConfigureResponse__Output) => void): grpc.ClientUnaryCall; - Configure(argument: _grpc_testing_ClientConfigureRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_ClientConfigureResponse__Output) => void): grpc.ClientUnaryCall; - Configure(argument: _grpc_testing_ClientConfigureRequest, callback: (error?: grpc.ServiceError, result?: _grpc_testing_ClientConfigureResponse__Output) => void): grpc.ClientUnaryCall; + Configure(argument: _grpc_testing_ClientConfigureRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_ClientConfigureResponse__Output>): grpc.ClientUnaryCall; + Configure(argument: _grpc_testing_ClientConfigureRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_ClientConfigureResponse__Output>): grpc.ClientUnaryCall; + Configure(argument: _grpc_testing_ClientConfigureRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_ClientConfigureResponse__Output>): grpc.ClientUnaryCall; + Configure(argument: _grpc_testing_ClientConfigureRequest, callback: grpc.requestCallback<_grpc_testing_ClientConfigureResponse__Output>): grpc.ClientUnaryCall; /** * Update the tes client's configuration. */ - configure(argument: _grpc_testing_ClientConfigureRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_ClientConfigureResponse__Output) => void): grpc.ClientUnaryCall; - configure(argument: _grpc_testing_ClientConfigureRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_ClientConfigureResponse__Output) => void): grpc.ClientUnaryCall; - configure(argument: _grpc_testing_ClientConfigureRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_ClientConfigureResponse__Output) => void): grpc.ClientUnaryCall; - configure(argument: _grpc_testing_ClientConfigureRequest, callback: (error?: grpc.ServiceError, result?: _grpc_testing_ClientConfigureResponse__Output) => void): grpc.ClientUnaryCall; + configure(argument: _grpc_testing_ClientConfigureRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_ClientConfigureResponse__Output>): grpc.ClientUnaryCall; + configure(argument: _grpc_testing_ClientConfigureRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_ClientConfigureResponse__Output>): grpc.ClientUnaryCall; + configure(argument: _grpc_testing_ClientConfigureRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_ClientConfigureResponse__Output>): grpc.ClientUnaryCall; + configure(argument: _grpc_testing_ClientConfigureRequest, callback: grpc.requestCallback<_grpc_testing_ClientConfigureResponse__Output>): grpc.ClientUnaryCall; } @@ -35,3 +36,7 @@ export interface XdsUpdateClientConfigureServiceHandlers extends grpc.UntypedSer Configure: grpc.handleUnaryCall<_grpc_testing_ClientConfigureRequest__Output, _grpc_testing_ClientConfigureResponse>; } + +export interface XdsUpdateClientConfigureServiceDefinition extends grpc.ServiceDefinition { + Configure: MethodDefinition<_grpc_testing_ClientConfigureRequest, _grpc_testing_ClientConfigureResponse, _grpc_testing_ClientConfigureRequest__Output, _grpc_testing_ClientConfigureResponse__Output> +} diff --git a/packages/grpc-js-xds/interop/generated/grpc/testing/XdsUpdateHealthService.ts b/packages/grpc-js-xds/interop/generated/grpc/testing/XdsUpdateHealthService.ts index aa1e35dca..aa3d6e9c6 100644 --- a/packages/grpc-js-xds/interop/generated/grpc/testing/XdsUpdateHealthService.ts +++ b/packages/grpc-js-xds/interop/generated/grpc/testing/XdsUpdateHealthService.ts @@ -1,29 +1,30 @@ // Original file: proto/grpc/testing/test.proto import type * as grpc from '@grpc/grpc-js' +import type { MethodDefinition } from '@grpc/proto-loader' import type { Empty as _grpc_testing_Empty, Empty__Output as _grpc_testing_Empty__Output } from '../../grpc/testing/Empty'; /** * A service to remotely control health status of an xDS test server. */ export interface XdsUpdateHealthServiceClient extends grpc.Client { - SetNotServing(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - SetNotServing(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - SetNotServing(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - SetNotServing(argument: _grpc_testing_Empty, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - setNotServing(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - setNotServing(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - setNotServing(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - setNotServing(argument: _grpc_testing_Empty, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; + SetNotServing(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + SetNotServing(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + SetNotServing(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + SetNotServing(argument: _grpc_testing_Empty, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + setNotServing(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + setNotServing(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + setNotServing(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + setNotServing(argument: _grpc_testing_Empty, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; - SetServing(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - SetServing(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - SetServing(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - SetServing(argument: _grpc_testing_Empty, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - setServing(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - setServing(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - setServing(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; - setServing(argument: _grpc_testing_Empty, callback: (error?: grpc.ServiceError, result?: _grpc_testing_Empty__Output) => void): grpc.ClientUnaryCall; + SetServing(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + SetServing(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + SetServing(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + SetServing(argument: _grpc_testing_Empty, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + setServing(argument: _grpc_testing_Empty, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + setServing(argument: _grpc_testing_Empty, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + setServing(argument: _grpc_testing_Empty, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; + setServing(argument: _grpc_testing_Empty, callback: grpc.requestCallback<_grpc_testing_Empty__Output>): grpc.ClientUnaryCall; } @@ -36,3 +37,8 @@ export interface XdsUpdateHealthServiceHandlers extends grpc.UntypedServiceImple SetServing: grpc.handleUnaryCall<_grpc_testing_Empty__Output, _grpc_testing_Empty>; } + +export interface XdsUpdateHealthServiceDefinition extends grpc.ServiceDefinition { + SetNotServing: MethodDefinition<_grpc_testing_Empty, _grpc_testing_Empty, _grpc_testing_Empty__Output, _grpc_testing_Empty__Output> + SetServing: MethodDefinition<_grpc_testing_Empty, _grpc_testing_Empty, _grpc_testing_Empty__Output, _grpc_testing_Empty__Output> +} diff --git a/packages/grpc-js-xds/interop/generated/test.ts b/packages/grpc-js-xds/interop/generated/test.ts index f91f0c970..722f8fe28 100644 --- a/packages/grpc-js-xds/interop/generated/test.ts +++ b/packages/grpc-js-xds/interop/generated/test.ts @@ -1,12 +1,12 @@ import type * as grpc from '@grpc/grpc-js'; -import type { ServiceDefinition, EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; +import type { EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; -import type { LoadBalancerStatsServiceClient as _grpc_testing_LoadBalancerStatsServiceClient } from './grpc/testing/LoadBalancerStatsService'; -import type { ReconnectServiceClient as _grpc_testing_ReconnectServiceClient } from './grpc/testing/ReconnectService'; -import type { TestServiceClient as _grpc_testing_TestServiceClient } from './grpc/testing/TestService'; -import type { UnimplementedServiceClient as _grpc_testing_UnimplementedServiceClient } from './grpc/testing/UnimplementedService'; -import type { XdsUpdateClientConfigureServiceClient as _grpc_testing_XdsUpdateClientConfigureServiceClient } from './grpc/testing/XdsUpdateClientConfigureService'; -import type { XdsUpdateHealthServiceClient as _grpc_testing_XdsUpdateHealthServiceClient } from './grpc/testing/XdsUpdateHealthService'; +import type { LoadBalancerStatsServiceClient as _grpc_testing_LoadBalancerStatsServiceClient, LoadBalancerStatsServiceDefinition as _grpc_testing_LoadBalancerStatsServiceDefinition } from './grpc/testing/LoadBalancerStatsService'; +import type { ReconnectServiceClient as _grpc_testing_ReconnectServiceClient, ReconnectServiceDefinition as _grpc_testing_ReconnectServiceDefinition } from './grpc/testing/ReconnectService'; +import type { TestServiceClient as _grpc_testing_TestServiceClient, TestServiceDefinition as _grpc_testing_TestServiceDefinition } from './grpc/testing/TestService'; +import type { UnimplementedServiceClient as _grpc_testing_UnimplementedServiceClient, UnimplementedServiceDefinition as _grpc_testing_UnimplementedServiceDefinition } from './grpc/testing/UnimplementedService'; +import type { XdsUpdateClientConfigureServiceClient as _grpc_testing_XdsUpdateClientConfigureServiceClient, XdsUpdateClientConfigureServiceDefinition as _grpc_testing_XdsUpdateClientConfigureServiceDefinition } from './grpc/testing/XdsUpdateClientConfigureService'; +import type { XdsUpdateHealthServiceClient as _grpc_testing_XdsUpdateHealthServiceClient, XdsUpdateHealthServiceDefinition as _grpc_testing_XdsUpdateHealthServiceDefinition } from './grpc/testing/XdsUpdateHealthService'; type SubtypeConstructor any, Subtype> = { new(...args: ConstructorParameters): Subtype; @@ -28,7 +28,7 @@ export interface ProtoGrpcType { /** * A service used to obtain stats for verifying LB behavior. */ - LoadBalancerStatsService: SubtypeConstructor & { service: ServiceDefinition } + LoadBalancerStatsService: SubtypeConstructor & { service: _grpc_testing_LoadBalancerStatsServiceDefinition } Payload: MessageTypeDefinition PayloadType: EnumTypeDefinition ReconnectInfo: MessageTypeDefinition @@ -36,7 +36,7 @@ export interface ProtoGrpcType { /** * A service used to control reconnect server. */ - ReconnectService: SubtypeConstructor & { service: ServiceDefinition } + ReconnectService: SubtypeConstructor & { service: _grpc_testing_ReconnectServiceDefinition } ResponseParameters: MessageTypeDefinition SimpleRequest: MessageTypeDefinition SimpleResponse: MessageTypeDefinition @@ -48,20 +48,20 @@ export interface ProtoGrpcType { * A simple service to test the various types of RPCs and experiment with * performance with various types of payload. */ - TestService: SubtypeConstructor & { service: ServiceDefinition } + TestService: SubtypeConstructor & { service: _grpc_testing_TestServiceDefinition } /** * A simple service NOT implemented at servers so clients can test for * that case. */ - UnimplementedService: SubtypeConstructor & { service: ServiceDefinition } + UnimplementedService: SubtypeConstructor & { service: _grpc_testing_UnimplementedServiceDefinition } /** * A service to dynamically update the configuration of an xDS test client. */ - XdsUpdateClientConfigureService: SubtypeConstructor & { service: ServiceDefinition } + XdsUpdateClientConfigureService: SubtypeConstructor & { service: _grpc_testing_XdsUpdateClientConfigureServiceDefinition } /** * A service to remotely control health status of an xDS test server. */ - XdsUpdateHealthService: SubtypeConstructor & { service: ServiceDefinition } + XdsUpdateHealthService: SubtypeConstructor & { service: _grpc_testing_XdsUpdateHealthServiceDefinition } } } } diff --git a/packages/grpc-js-xds/interop/xds-interop-client.ts b/packages/grpc-js-xds/interop/xds-interop-client.ts index 5f32831c1..a245ad09f 100644 --- a/packages/grpc-js-xds/interop/xds-interop-client.ts +++ b/packages/grpc-js-xds/interop/xds-interop-client.ts @@ -30,8 +30,98 @@ import { XdsUpdateClientConfigureServiceHandlers } from './generated/grpc/testin import { Empty__Output } from './generated/grpc/testing/Empty'; import { LoadBalancerAccumulatedStatsResponse } from './generated/grpc/testing/LoadBalancerAccumulatedStatsResponse'; +import TypedLoadBalancingConfig = grpc.experimental.TypedLoadBalancingConfig; +import LoadBalancer = grpc.experimental.LoadBalancer; +import ChannelControlHelper = grpc.experimental.ChannelControlHelper; +import ChildLoadBalancerHandler = grpc.experimental.ChildLoadBalancerHandler; +import Endpoint = grpc.experimental.Endpoint; +import Picker = grpc.experimental.Picker; +import PickArgs = grpc.experimental.PickArgs; +import PickResult = grpc.experimental.PickResult; +import PickResultType = grpc.experimental.PickResultType; +import createChildChannelControlHelper = grpc.experimental.createChildChannelControlHelper; +import parseLoadBalancingConfig = grpc.experimental.parseLoadBalancingConfig; + grpc_xds.register(); +const LB_POLICY_NAME = 'test.RpcBehaviorLoadBalancer'; + +class RpcBehaviorLoadBalancingConfig implements TypedLoadBalancingConfig { + constructor(private rpcBehavior: string) {} + getLoadBalancerName(): string { + return LB_POLICY_NAME; + } + toJsonObject(): object { + return { + [LB_POLICY_NAME]: { + 'rpcBehavior': this.rpcBehavior + } + }; + } + getRpcBehavior() { + return this.rpcBehavior; + } + static createFromJson(obj: any): RpcBehaviorLoadBalancingConfig { + if (!('rpcBehavior' in obj && typeof obj.rpcBehavior === 'string')) { + throw new Error(`${LB_POLICY_NAME} parsing error: expected string field rpcBehavior`); + } + return new RpcBehaviorLoadBalancingConfig(obj.rpcBehavior); + } +} + +class RpcBehaviorPicker implements Picker { + constructor(private wrappedPicker: Picker, private rpcBehavior: string) {} + pick(pickArgs: PickArgs): PickResult { + const wrappedPick = this.wrappedPicker.pick(pickArgs); + if (wrappedPick.pickResultType === PickResultType.COMPLETE) { + pickArgs.metadata.add('rpc-behavior', this.rpcBehavior); + } + return wrappedPick; + } +} + +const RPC_BEHAVIOR_CHILD_CONFIG = parseLoadBalancingConfig({round_robin: {}}); + +/** + * Load balancer implementation for Custom LB policy test + */ +class RpcBehaviorLoadBalancer implements LoadBalancer { + private child: ChildLoadBalancerHandler; + private latestConfig: RpcBehaviorLoadBalancingConfig | null = null; + constructor(channelControlHelper: ChannelControlHelper, options: grpc.ChannelOptions) { + const childChannelControlHelper = createChildChannelControlHelper(channelControlHelper, { + updateState: (connectivityState, picker) => { + if (connectivityState === grpc.connectivityState.READY && this.latestConfig) { + picker = new RpcBehaviorPicker(picker, this.latestConfig.getRpcBehavior()); + } + channelControlHelper.updateState(connectivityState, picker); + } + }); + this.child = new ChildLoadBalancerHandler(childChannelControlHelper, options); + } + updateAddressList(endpointList: Endpoint[], lbConfig: TypedLoadBalancingConfig, attributes: { [key: string]: unknown; }): void { + if (!(lbConfig instanceof RpcBehaviorLoadBalancingConfig)) { + return; + } + this.latestConfig = lbConfig; + this.child.updateAddressList(endpointList, RPC_BEHAVIOR_CHILD_CONFIG, attributes); + } + exitIdle(): void { + this.child.exitIdle(); + } + resetBackoff(): void { + this.child.resetBackoff(); + } + destroy(): void { + this.child.destroy(); + } + getTypeName(): string { + return LB_POLICY_NAME; + } +} + +grpc.experimental.registerLoadBalancerType(LB_POLICY_NAME, RpcBehaviorLoadBalancer, RpcBehaviorLoadBalancingConfig); + const packageDefinition = protoLoader.loadSync('grpc/testing/test.proto', { keepCase: true, defaults: true, @@ -91,7 +181,7 @@ class CallSubscriber { } if (peerName in this.callsSucceededByPeer) { this.callsSucceededByPeer[peerName] += 1; - } else { + } else { this.callsSucceededByPeer[peerName] = 1; } this.callsSucceeded += 1; @@ -130,6 +220,13 @@ class CallStatsTracker { private subscribers: CallSubscriber[] = []; + private removeSubscriber(subscriber: CallSubscriber) { + const index = this.subscribers.indexOf(subscriber); + if (index >= 0) { + this.subscribers.splice(index, 1); + } + } + getCallStats(callCount: number, timeoutSec: number): Promise { return new Promise((resolve, reject) => { let finished = false; @@ -142,7 +239,7 @@ class CallStatsTracker { setTimeout(() => { if (!finished) { finished = true; - this.subscribers.splice(this.subscribers.indexOf(subscriber), 1); + this.removeSubscriber(subscriber); resolve(subscriber.getFinalStats()); } }, timeoutSec * 1000) @@ -155,7 +252,7 @@ class CallStatsTracker { for (const subscriber of callSubscribers) { subscriber.addCallStarted(); if (!subscriber.needsMoreCalls()) { - this.subscribers.splice(this.subscribers.indexOf(subscriber), 1); + this.removeSubscriber(subscriber); } } return { @@ -173,6 +270,27 @@ class CallStatsTracker { } } +class RecentTimestampList { + private timeList: bigint[] = []; + private nextIndex = 0; + + constructor(private readonly size: number) {} + + isFull() { + return this.timeList.length === this.size; + } + + insertTimestamp(timestamp: bigint) { + this.timeList[this.nextIndex] = timestamp; + this.nextIndex = (this.nextIndex + 1) % this.size; + } + + getSpan(): bigint { + const lastIndex = (this.nextIndex + this.size - 1) % this.size; + return this.timeList[lastIndex] - this.timeList[this.nextIndex]; + } +} + type CallType = 'EmptyCall' | 'UnaryCall'; interface ClientConfiguration { @@ -196,31 +314,103 @@ const currentConfig: ClientConfiguration = { let anyCallSucceeded = false; const accumulatedStats: LoadBalancerAccumulatedStatsResponse = { + num_rpcs_started_by_method: { + EMPTY_CALL: 0, + UNARY_CALL: 0 + }, + num_rpcs_succeeded_by_method: { + EMPTY_CALL: 0, + UNARY_CALL: 0 + }, + num_rpcs_failed_by_method: { + EMPTY_CALL: 0, + UNARY_CALL: 0 + }, stats_per_method: { - EmptyCall: { + EMPTY_CALL: { rpcs_started: 0, result: {} }, - UnaryCall: { + UNARY_CALL: { rpcs_started: 0, result: {} } } }; -function makeSingleRequest(client: TestServiceClient, type: CallType, failOnFailedRpcs: boolean, callStatsTracker: CallStatsTracker) { - const callTypeStats = accumulatedStats.stats_per_method![type]; - callTypeStats.rpcs_started! += 1; +function addAccumulatedCallStarted(callName: string) { + accumulatedStats.stats_per_method![callName].rpcs_started! += 1; + accumulatedStats.num_rpcs_started_by_method![callName] += 1; +} + +function addAccumulatedCallEnded(callName: string, result: grpc.status) { + accumulatedStats.stats_per_method![callName].result![result] = (accumulatedStats.stats_per_method![callName].result![result] ?? 0) + 1; + if (result === grpc.status.OK) { + accumulatedStats.num_rpcs_succeeded_by_method![callName] += 1; + } else { + accumulatedStats.num_rpcs_failed_by_method![callName] += 1; + } +} + +const callTimeHistogram: {[callType: string]: {[status: number]: number[]}} = { + UnaryCall: {}, + EmptyCall: {} +} + +function renderHistogram(histogram: number[]): string { + const maxValue = Math.max(...histogram); + const maxIndexLength = `${histogram.length - 1}`.length; + const maxBarWidth = 60; + const result: string[] = []; + result.push('-'.repeat(maxIndexLength + maxBarWidth + 1)); + for (let i = 0; i < histogram.length; i++) { + result.push(`${' '.repeat(maxIndexLength - `${i}`.length)}${i}|${'█'.repeat(maxBarWidth * histogram[i] / maxValue)}`); + } + return result.join('\n'); +} + +function printAllHistograms() { + console.log('Call duration histograms'); + for (const callType in callTimeHistogram) { + console.log(callType); + const x = callTimeHistogram[callType]; + for (const statusCode in callTimeHistogram[callType]) { + console.log(`${statusCode} ${grpc.status[statusCode]}`); + console.log(renderHistogram(callTimeHistogram[callType][statusCode])); + } + } +} + +/** + * Timestamps output by process.hrtime.bigint() are a bigint number of + * nanoseconds. This is the representation of 1 second in that context. + */ +const TIMESTAMP_ONE_SECOND = BigInt(1e9); + +function makeSingleRequest(client: TestServiceClient, type: CallType, failOnFailedRpcs: boolean, callStatsTracker: CallStatsTracker, callStartTimestamps: RecentTimestampList) { + const callEnumName = callTypeEnumMapReverse[type]; + addAccumulatedCallStarted(callEnumName); const notifier = callStatsTracker.startCall(); let gotMetadata: boolean = false; let hostname: string | null = null; let completed: boolean = false; let completedWithError: boolean = false; + const startTime = process.hrtime.bigint(); const deadline = new Date(); deadline.setSeconds(deadline.getSeconds() + currentConfig.timeoutSec); - const callback = (error: grpc.ServiceError | undefined, value: Empty__Output | undefined) => { + const callback = (error: grpc.ServiceError | null, value: Empty__Output | undefined) => { const statusCode = error?.code ?? grpc.status.OK; - callTypeStats.result![statusCode] = (callTypeStats.result![statusCode] ?? 0) + 1; + const duration = process.hrtime.bigint() - startTime; + const durationSeconds = Number(duration / TIMESTAMP_ONE_SECOND) | 0; + if (!callTimeHistogram[type][statusCode]) { + callTimeHistogram[type][statusCode] = []; + } + if (callTimeHistogram[type][statusCode][durationSeconds]) { + callTimeHistogram[type][statusCode][durationSeconds] += 1; + } else { + callTimeHistogram[type][statusCode][durationSeconds] = 1; + } + addAccumulatedCallEnded(callEnumName, statusCode); if (error) { if (failOnFailedRpcs && anyCallSucceeded) { console.error('A call failed after a call succeeded'); @@ -253,15 +443,35 @@ function makeSingleRequest(client: TestServiceClient, type: CallType, failOnFail } } }); - + /* callStartTimestamps tracks the last N timestamps of started calls, where N + * is the target QPS. If the measured span of time between the first and last + * of those N calls is greater than 1 second, we make another call + * ~immediately to correct for that. */ + callStartTimestamps.insertTimestamp(startTime); + if (callStartTimestamps.isFull()) { + if (callStartTimestamps.getSpan() > TIMESTAMP_ONE_SECOND) { + setImmediate(() => { + makeSingleRequest(client, type, failOnFailedRpcs, callStatsTracker, callStartTimestamps); + }); + } + } } function sendConstantQps(client: TestServiceClient, qps: number, failOnFailedRpcs: boolean, callStatsTracker: CallStatsTracker) { + const callStartTimestampsTrackers: {[callType: string]: RecentTimestampList} = {}; + for (const callType of ['EmptyCall', 'UnaryCall']) { + callStartTimestampsTrackers[callType] = new RecentTimestampList(qps); + } setInterval(() => { for (const callType of currentConfig.callTypes) { - makeSingleRequest(client, callType, failOnFailedRpcs, callStatsTracker); + makeSingleRequest(client, callType, failOnFailedRpcs, callStatsTracker, callStartTimestampsTrackers[callType]); } }, 1000/qps); + if (VERBOSITY >= 2) { + setInterval(() => { + console.log(`Accumulated stats: ${JSON.stringify(accumulatedStats, undefined, 2)}`); + }, 1000); + } } const callTypeEnumMap = { @@ -269,18 +479,27 @@ const callTypeEnumMap = { 'UNARY_CALL': 'UnaryCall' as CallType }; +const callTypeEnumMapReverse = { + 'EmptyCall': 'EMPTY_CALL', + 'UnaryCall': 'UNARY_CALL' +} + +const DEFAULT_TIMEOUT_SEC = 20; + function main() { const argv = yargs .string(['fail_on_failed_rpcs', 'server', 'stats_port', 'rpc', 'metadata']) - .number(['num_channels', 'qps']) + .number(['num_channels', 'qps', 'rpc_timeout_sec']) .demandOption(['server', 'stats_port']) .default('num_channels', 1) .default('qps', 1) .default('rpc', 'UnaryCall') .default('metadata', '') + .default('rpc_timeout_sec', DEFAULT_TIMEOUT_SEC) .argv; console.log('Starting xDS interop client. Args: ', argv); currentConfig.callTypes = argv.rpc.split(',').filter(value => value === 'EmptyCall' || value === 'UnaryCall') as CallType[]; + currentConfig.timeoutSec = argv.rpc_timeout_sec; for (const item of argv.metadata.split(',')) { const [method, key, value] = item.split(':'); if (value === undefined) { @@ -299,9 +518,9 @@ function main() { * channels do not share any subchannels. It does not have any * inherent function. */ console.log(`Interop client channel ${i} starting sending ${argv.qps} QPS to ${argv.server}`); - sendConstantQps(new loadedProto.grpc.testing.TestService(argv.server, grpc.credentials.createInsecure(), {'unique': i}), - argv.qps, - argv.fail_on_failed_rpcs === 'true', + sendConstantQps(new loadedProto.grpc.testing.TestService(argv.server, grpc.credentials.createInsecure(), {'unique': i}), + argv.qps, + argv.fail_on_failed_rpcs === 'true', callStatsTracker); } @@ -316,23 +535,30 @@ function main() { }); }, GetClientAccumulatedStats: (call, callback) => { + console.log(`Sending accumulated stats response: ${JSON.stringify(accumulatedStats)}`); + printAllHistograms(); callback(null, accumulatedStats); } } const xdsUpdateClientConfigureServiceImpl: XdsUpdateClientConfigureServiceHandlers = { Configure: (call, callback) => { + console.log('Received new client configuration: ' + JSON.stringify(call.request, undefined, 2)); const callMetadata = { EmptyCall: new grpc.Metadata(), UnaryCall: new grpc.Metadata() - } + }; for (const metadataItem of call.request.metadata) { callMetadata[callTypeEnumMap[metadataItem.type]].add(metadataItem.key, metadataItem.value); } currentConfig.callTypes = call.request.types.map(value => callTypeEnumMap[value]); currentConfig.metadata = callMetadata; - currentConfig.timeoutSec = call.request.timeout_sec - console.log('Received new client configuration: ' + JSON.stringify(currentConfig, undefined, 2)); + if (call.request.timeout_sec > 0) { + currentConfig.timeoutSec = call.request.timeout_sec; + } else { + currentConfig.timeoutSec = DEFAULT_TIMEOUT_SEC; + } + console.log('Updated to new client configuration: ' + JSON.stringify(currentConfig, undefined, 2)); callback(null, {}); } } @@ -340,6 +566,7 @@ function main() { const server = new grpc.Server(); server.addService(loadedProto.grpc.testing.LoadBalancerStatsService.service, loadBalancerStatsServiceImpl); server.addService(loadedProto.grpc.testing.XdsUpdateClientConfigureService.service, xdsUpdateClientConfigureServiceImpl); + grpc.addAdminServicesToServer(server); server.bindAsync(`0.0.0.0:${argv.stats_port}`, grpc.ServerCredentials.createInsecure(), (error, port) => { if (error) { throw error; @@ -351,4 +578,4 @@ function main() { if (require.main === module) { main(); -} \ No newline at end of file +} diff --git a/packages/grpc-js-xds/package.json b/packages/grpc-js-xds/package.json index dfd778bd3..9e7d2f18a 100644 --- a/packages/grpc-js-xds/package.json +++ b/packages/grpc-js-xds/package.json @@ -1,6 +1,6 @@ { "name": "@grpc/grpc-js-xds", - "version": "1.3.1", + "version": "1.10.1", "description": "Plugin for @grpc/grpc-js. Adds the xds:// URL scheme and associated features.", "main": "build/src/index.js", "scripts": { @@ -9,11 +9,12 @@ "clean": "gts clean", "compile": "tsc", "fix": "gts fix", - "prepare": "npm run compile", + "prepare": "npm run generate-types && npm run compile", "pretest": "npm run compile", "posttest": "npm run check", - "generate-types": "proto-loader-gen-types --keepCase --longs String --enums String --defaults --oneofs --json --includeComments --includeDirs deps/envoy-api/ deps/udpa/ deps/googleapis/ deps/protoc-gen-validate/ -O src/generated/ --grpcLib @grpc/grpc-js envoy/service/discovery/v2/ads.proto envoy/service/load_stats/v2/lrs.proto envoy/api/v2/listener.proto envoy/api/v2/route.proto envoy/api/v2/cluster.proto envoy/api/v2/endpoint.proto envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto", - "generate-interop-types": "proto-loader-gen-types --keep-case --longs String --enums String --defaults --oneofs --json --includeComments --includeDirs proto/ -O interop/generated --grpcLib @grpc/grpc-js grpc/testing/test.proto" + "generate-types": "proto-loader-gen-types --keepCase --longs String --enums String --defaults --oneofs --includeComments --includeDirs deps/envoy-api/ deps/xds/ deps/googleapis/ deps/protoc-gen-validate/ -O src/generated/ --grpcLib @grpc/grpc-js envoy/service/discovery/v3/ads.proto envoy/service/load_stats/v3/lrs.proto envoy/config/listener/v3/listener.proto envoy/config/route/v3/route.proto envoy/config/cluster/v3/cluster.proto envoy/config/endpoint/v3/endpoint.proto envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto udpa/type/v1/typed_struct.proto xds/type/v3/typed_struct.proto envoy/extensions/filters/http/fault/v3/fault.proto envoy/service/status/v3/csds.proto envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.proto envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.proto envoy/extensions/clusters/aggregate/v3/cluster.proto", + "generate-interop-types": "proto-loader-gen-types --keep-case --longs String --enums String --defaults --oneofs --json --includeComments --includeDirs proto/ -O interop/generated --grpcLib @grpc/grpc-js grpc/testing/test.proto", + "generate-test-types": "proto-loader-gen-types --keep-case --longs String --enums String --defaults --oneofs --json --includeComments --includeDirs proto/ -O test/generated --grpcLib @grpc/grpc-js grpc/testing/echo.proto" }, "repository": { "type": "git", @@ -37,17 +38,19 @@ "@types/mocha": "^5.2.6", "@types/node": "^13.11.1", "@types/yargs": "^15.0.5", - "gts": "^2.0.2", - "typescript": "^3.8.3", + "gts": "^5.0.1", + "typescript": "^4.9.5", "yargs": "^15.4.1" }, "dependencies": { - "@grpc/proto-loader": "^0.6.0", + "@grpc/proto-loader": "^0.7.13", "google-auth-library": "^7.0.2", - "re2-wasm": "^1.0.1" + "re2-wasm": "^1.0.1", + "vscode-uri": "^3.0.7", + "xxhash-wasm": "^1.0.2" }, "peerDependencies": { - "@grpc/grpc-js": "~1.3.0" + "@grpc/grpc-js": "~1.10.0" }, "engines": { "node": ">=10.10.0" @@ -55,15 +58,22 @@ "files": [ "src/**/*.ts", "build/src/**/*.{js,d.ts,js.map}", - "deps/envoy-api/envoy/api/v2/**/*.proto", + "deps/envoy-api/envoy/admin/v3/**/*.proto", "deps/envoy-api/envoy/config/**/*.proto", + "deps/envoy-api/envoy/data/**/*.proto", "deps/envoy-api/envoy/service/**/*.proto", "deps/envoy-api/envoy/type/**/*.proto", "deps/envoy-api/envoy/annotations/**/*.proto", + "deps/envoy-api/envoy/extensions/**/*.proto", "deps/googleapis/google/api/**/*.proto", "deps/googleapis/google/protobuf/**/*.proto", "deps/googleapis/google/rpc/**/*.proto", - "deps/udpa/udpa/annotations/**/*.proto", + "deps/protoc-gen-validate/**/*.proto", + "deps/xds/udpa/annotations/**/*.proto", + "deps/xds/udpa/type/**/*.proto", + "deps/xds/xds/annotations/**/*.proto", + "deps/xds/xds/core/**/*.proto", + "deps/xds/xds/type/**/*.proto", "deps/protoc-gen-validate/validate/**/*.proto" ] } diff --git a/packages/grpc-js-xds/proto/grpc/testing/echo.proto b/packages/grpc-js-xds/proto/grpc/testing/echo.proto new file mode 100644 index 000000000..7f444b43f --- /dev/null +++ b/packages/grpc-js-xds/proto/grpc/testing/echo.proto @@ -0,0 +1,70 @@ + +// Copyright 2015 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package grpc.testing; + +import "grpc/testing/echo_messages.proto"; +import "grpc/testing/simple_messages.proto"; + +service EchoTestService { + rpc Echo(EchoRequest) returns (EchoResponse); + rpc Echo1(EchoRequest) returns (EchoResponse); + rpc Echo2(EchoRequest) returns (EchoResponse); + rpc CheckDeadlineUpperBound(SimpleRequest) returns (StringValue); + rpc CheckDeadlineSet(SimpleRequest) returns (StringValue); + // A service which checks that the initial metadata sent over contains some + // expected key value pair + rpc CheckClientInitialMetadata(SimpleRequest) returns (SimpleResponse); + rpc RequestStream(stream EchoRequest) returns (EchoResponse); + rpc ResponseStream(EchoRequest) returns (stream EchoResponse); + rpc BidiStream(stream EchoRequest) returns (stream EchoResponse); + rpc Unimplemented(EchoRequest) returns (EchoResponse); + rpc UnimplementedBidi(stream EchoRequest) returns (stream EchoResponse); +} + +service EchoTest1Service { + rpc Echo(EchoRequest) returns (EchoResponse); + rpc Echo1(EchoRequest) returns (EchoResponse); + rpc Echo2(EchoRequest) returns (EchoResponse); + // A service which checks that the initial metadata sent over contains some + // expected key value pair + rpc CheckClientInitialMetadata(SimpleRequest) returns (SimpleResponse); + rpc RequestStream(stream EchoRequest) returns (EchoResponse); + rpc ResponseStream(EchoRequest) returns (stream EchoResponse); + rpc BidiStream(stream EchoRequest) returns (stream EchoResponse); + rpc Unimplemented(EchoRequest) returns (EchoResponse); +} + +service EchoTest2Service { + rpc Echo(EchoRequest) returns (EchoResponse); + rpc Echo1(EchoRequest) returns (EchoResponse); + rpc Echo2(EchoRequest) returns (EchoResponse); + // A service which checks that the initial metadata sent over contains some + // expected key value pair + rpc CheckClientInitialMetadata(SimpleRequest) returns (SimpleResponse); + rpc RequestStream(stream EchoRequest) returns (EchoResponse); + rpc ResponseStream(EchoRequest) returns (stream EchoResponse); + rpc BidiStream(stream EchoRequest) returns (stream EchoResponse); + rpc Unimplemented(EchoRequest) returns (EchoResponse); +} + +service UnimplementedEchoService { + rpc Unimplemented(EchoRequest) returns (EchoResponse); +} + +// A service without any rpc defined to test coverage. +service NoRpcService {} diff --git a/packages/grpc-js-xds/proto/grpc/testing/echo_messages.proto b/packages/grpc-js-xds/proto/grpc/testing/echo_messages.proto new file mode 100644 index 000000000..44f22133e --- /dev/null +++ b/packages/grpc-js-xds/proto/grpc/testing/echo_messages.proto @@ -0,0 +1,74 @@ + +// Copyright 2015 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package grpc.testing; + +option cc_enable_arenas = true; + +import "grpc/testing/xds/v3/orca_load_report.proto"; + +// Message to be echoed back serialized in trailer. +message DebugInfo { + repeated string stack_entries = 1; + string detail = 2; +} + +// Error status client expects to see. +message ErrorStatus { + int32 code = 1; + string error_message = 2; + string binary_error_details = 3; +} + +message RequestParams { + bool echo_deadline = 1; + int32 client_cancel_after_us = 2; + int32 server_cancel_after_us = 3; + bool echo_metadata = 4; + bool check_auth_context = 5; + int32 response_message_length = 6; + bool echo_peer = 7; + string expected_client_identity = 8; // will force check_auth_context. + bool skip_cancelled_check = 9; + string expected_transport_security_type = 10; + DebugInfo debug_info = 11; + bool server_die = 12; // Server should not see a request with this set. + string binary_error_details = 13; + ErrorStatus expected_error = 14; + int32 server_sleep_us = 15; // sleep when invoking server for deadline tests + int32 backend_channel_idx = 16; // which backend to send request to + bool echo_metadata_initially = 17; + bool server_notify_client_when_started = 18; + xds.data.orca.v3.OrcaLoadReport backend_metrics = 19; + bool echo_host_from_authority_header = 20; +} + +message EchoRequest { + string message = 1; + RequestParams param = 2; +} + +message ResponseParams { + int64 request_deadline = 1; + string host = 2; + string peer = 3; +} + +message EchoResponse { + string message = 1; + ResponseParams param = 2; +} diff --git a/packages/grpc-js-xds/proto/grpc/testing/simple_messages.proto b/packages/grpc-js-xds/proto/grpc/testing/simple_messages.proto new file mode 100644 index 000000000..3afe236b4 --- /dev/null +++ b/packages/grpc-js-xds/proto/grpc/testing/simple_messages.proto @@ -0,0 +1,26 @@ + +// Copyright 2018 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package grpc.testing; + +message SimpleRequest {} + +message SimpleResponse {} + +message StringValue { + string message = 1; +} diff --git a/packages/grpc-js-xds/proto/grpc/testing/xds/v3/orca_load_report.proto b/packages/grpc-js-xds/proto/grpc/testing/xds/v3/orca_load_report.proto new file mode 100644 index 000000000..033e64ba4 --- /dev/null +++ b/packages/grpc-js-xds/proto/grpc/testing/xds/v3/orca_load_report.proto @@ -0,0 +1,44 @@ +// Copyright 2020 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Local copy of Envoy xDS proto file, used for testing only. + +syntax = "proto3"; + +package xds.data.orca.v3; + +// See section `ORCA load report format` of the design document in +// :ref:`https://github.com/envoyproxy/envoy/issues/6614`. + +message OrcaLoadReport { + // CPU utilization expressed as a fraction of available CPU resources. This + // should be derived from the latest sample or measurement. + double cpu_utilization = 1; + + // Memory utilization expressed as a fraction of available memory + // resources. This should be derived from the latest sample or measurement. + double mem_utilization = 2; + + // Total RPS being served by an endpoint. This should cover all services that an endpoint is + // responsible for. + uint64 rps = 3; + + // Application specific requests costs. Each value is an absolute cost (e.g. 3487 bytes of + // storage) associated with the request. + map request_cost = 4; + + // Resource utilization values. Each value is expressed as a fraction of total resources + // available, derived from the latest sample or measurement. + map utilization = 5; +} diff --git a/packages/grpc-js-xds/scripts/psm-interop-build-node.sh b/packages/grpc-js-xds/scripts/psm-interop-build-node.sh new file mode 100755 index 000000000..d52206f0e --- /dev/null +++ b/packages/grpc-js-xds/scripts/psm-interop-build-node.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +# Copyright 2024 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +set -eo pipefail + +####################################### +# Builds test app Docker images and pushes them to GCR. +# Called from psm_interop_kokoro_lib.sh. +# +# Globals: +# SRC_DIR: Absolute path to the source repo on Kokoro VM +# SERVER_IMAGE_NAME: Test server Docker image name +# CLIENT_IMAGE_NAME: Test client Docker image name +# GIT_COMMIT: SHA-1 of git commit being built +# DOCKER_REGISTRY: Docker registry to push to +# Outputs: +# Writes the output of docker image build stdout, stderr +####################################### +psm::lang::build_docker_images() { + local client_dockerfile="packages/grpc-js-xds/interop/Dockerfile" + + cd "${SRC_DIR}" + psm::tools::run_verbose git submodule update --init --recursive + psm::tools::run_verbose git submodule status + + psm::build::docker_images_generic "${client_dockerfile}" +} diff --git a/packages/grpc-js-xds/scripts/psm-interop-test-node.sh b/packages/grpc-js-xds/scripts/psm-interop-test-node.sh new file mode 100755 index 000000000..169cf06f2 --- /dev/null +++ b/packages/grpc-js-xds/scripts/psm-interop-test-node.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +# Copyright 2024 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +set -eo pipefail + +# Input parameters to psm:: methods of the install script. +readonly GRPC_LANGUAGE="node" +readonly BUILD_SCRIPT_DIR="$(dirname "$0")" + +# Used locally. +readonly TEST_DRIVER_INSTALL_SCRIPT_URL="https://raw.githubusercontent.com/${TEST_DRIVER_REPO_OWNER:-grpc}/psm-interop/${TEST_DRIVER_BRANCH:-main}/.kokoro/psm_interop_kokoro_lib.sh" + +psm::lang::source_install_lib() { + echo "Sourcing test driver install script from: ${TEST_DRIVER_INSTALL_SCRIPT_URL}" + local install_lib + # Download to a tmp file. + install_lib="$(mktemp -d)/psm_interop_kokoro_lib.sh" + curl -s --retry-connrefused --retry 5 -o "${install_lib}" "${TEST_DRIVER_INSTALL_SCRIPT_URL}" + # Checksum. + if command -v sha256sum &> /dev/null; then + echo "Install script checksum:" + sha256sum "${install_lib}" + fi + source "${install_lib}" +} + +psm::lang::source_install_lib +source "${BUILD_SCRIPT_DIR}/psm-interop-build-${GRPC_LANGUAGE}.sh" +psm::run "${PSM_TEST_SUITE}" diff --git a/packages/grpc-js-xds/scripts/xds-v3.sh b/packages/grpc-js-xds/scripts/xds-v3.sh new file mode 100755 index 000000000..103cbc429 --- /dev/null +++ b/packages/grpc-js-xds/scripts/xds-v3.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# Copyright 2021 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +XDS_V3_OPT="--xds_v3_support" $(dirname $0)/xds.sh \ No newline at end of file diff --git a/packages/grpc-js-xds/scripts/xds.sh b/packages/grpc-js-xds/scripts/xds.sh old mode 100644 new mode 100755 index 714b6fff8..85124cdc1 --- a/packages/grpc-js-xds/scripts/xds.sh +++ b/packages/grpc-js-xds/scripts/xds.sh @@ -24,7 +24,7 @@ curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.33.4/install.sh | b # Load NVM . $NVM_DIR/nvm.sh -nvm install 12 +nvm install 16 set -exu -o pipefail [[ -f /VERSION ]] && cat /VERSION @@ -48,17 +48,21 @@ git clone -b master --single-branch --depth=1 https://github.com/grpc/grpc.git grpc/tools/run_tests/helper_scripts/prep_xds.sh -GRPC_NODE_TRACE=xds_client,xds_resolver,cds_balancer,eds_balancer,priority,weighted_target,round_robin,resolving_load_balancer,subchannel,keepalive,dns_resolver \ +mkdir -p "${KOKORO_ARTIFACTS_DIR}/github/grpc/reports" + +GRPC_NODE_TRACE=xds_client,xds_resolver,xds_cluster_manager,cds_balancer,xds_cluster_resolver,xds_cluster_impl,priority,weighted_target,round_robin,resolving_load_balancer,subchannel,keepalive,dns_resolver,fault_injection,http_filter,csds,outlier_detection,server,server_call \ GRPC_NODE_VERBOSITY=DEBUG \ NODE_XDS_INTEROP_VERBOSITY=1 \ python3 grpc/tools/run_tests/run_xds_tests.py \ - --test_case="all,path_matching,header_matching" \ + --test_case="ping_pong,circuit_breaking" \ --project_id=grpc-testing \ - --source_image=projects/grpc-testing/global/images/xds-test-server-2 \ + --source_image=projects/grpc-testing/global/images/xds-test-server-5 \ --path_to_server_binary=/java_server/grpc-java/interop-testing/build/install/grpc-interop-testing/bin/xds-test-server \ --gcp_suffix=$(date '+%s') \ --verbose \ - --client_cmd="$(which node) grpc-node/packages/grpc-js-xds/build/interop/xds-interop-client \ + --qps=75 \ + ${XDS_V3_OPT-} \ + --client_cmd="$(which node) --enable-source-maps --prof --logfile=${KOKORO_ARTIFACTS_DIR}/github/grpc/reports/prof.log grpc-node/packages/grpc-js-xds/build/interop/xds-interop-client \ --server=xds:///{server_uri} \ --stats_port={stats_port} \ --qps={qps} \ diff --git a/packages/grpc-js-xds/src/csds.ts b/packages/grpc-js-xds/src/csds.ts new file mode 100644 index 000000000..f9fac8569 --- /dev/null +++ b/packages/grpc-js-xds/src/csds.ts @@ -0,0 +1,145 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { ClientConfig, _envoy_service_status_v3_ClientConfig_GenericXdsConfig as GenericXdsConfig } from "./generated/envoy/service/status/v3/ClientConfig"; +import { ClientStatusDiscoveryServiceHandlers } from "./generated/envoy/service/status/v3/ClientStatusDiscoveryService"; +import { ClientStatusRequest__Output } from "./generated/envoy/service/status/v3/ClientStatusRequest"; +import { ClientStatusResponse } from "./generated/envoy/service/status/v3/ClientStatusResponse"; +import { Timestamp } from "./generated/google/protobuf/Timestamp"; +import { xdsResourceNameToString } from "./resources"; +import { sendUnaryData, ServerDuplexStream, ServerUnaryCall, status, experimental, loadPackageDefinition, logVerbosity } from '@grpc/grpc-js'; +import { loadSync } from "@grpc/proto-loader"; +import { ProtoGrpcType as CsdsProtoGrpcType } from "./generated/csds"; + +import registerAdminService = experimental.registerAdminService; +import { XdsClient } from "./xds-client"; + +const TRACER_NAME = 'csds'; + +function trace(text: string): void { + experimental.trace(logVerbosity.DEBUG, TRACER_NAME, text); +} + + +function dateToProtoTimestamp(date?: Date | null): Timestamp | null { + if (!date) { + return null; + } + const millisSinceEpoch = date.getTime(); + return { + seconds: (millisSinceEpoch / 1000) | 0, + nanos: (millisSinceEpoch % 1000) * 1_000_000 + } +} + +const registeredClients: XdsClient[] = []; + +export function registerXdsClientWithCsds(client: XdsClient) { + registeredClients.push(client); +} + +function getCurrentConfigList(): ClientConfig[] { + const result: ClientConfig[] = []; + for (const client of registeredClients) { + if (!client.adsNode) { + continue; + } + const genericConfigList: GenericXdsConfig[] = []; + for (const [authority, authorityState] of client.authorityStateMap) { + for (const [type, typeMap] of authorityState.resourceMap) { + for (const [key, resourceState] of typeMap) { + const typeUrl = type.getTypeUrl(); + const meta = resourceState.meta; + genericConfigList.push({ + name: xdsResourceNameToString({authority, key}, typeUrl), + type_url: typeUrl, + client_status: meta.clientStatus, + version_info: meta.version, + xds_config: meta.clientStatus === 'ACKED' ? meta.rawResource : undefined, + last_updated: meta.updateTime ? dateToProtoTimestamp(meta.updateTime) : undefined, + error_state: meta.clientStatus === 'NACKED' ? { + details: meta.failedDetails, + failed_configuration: meta.rawResource, + last_update_attempt: meta.failedUpdateTime ? dateToProtoTimestamp(meta.failedUpdateTime) : undefined, + version_info: meta.failedVersion + } : undefined + }); + } + } + } + result.push({ + node: client.adsNode, + generic_xds_configs: genericConfigList + }); + } + return result; +} + +const csdsImplementation: ClientStatusDiscoveryServiceHandlers = { + FetchClientStatus(call: ServerUnaryCall, callback: sendUnaryData) { + const request = call.request; + if (request.node_matchers.length > 0) { + callback({ + code: status.INVALID_ARGUMENT, + details: 'Node matchers not supported' + }); + return; + } + callback(null, { + config: getCurrentConfigList() + }); + }, + StreamClientStatus(call: ServerDuplexStream) { + call.on('data', (request: ClientStatusRequest__Output) => { + if (request.node_matchers.length > 0) { + call.emit('error', { + code: status.INVALID_ARGUMENT, + details: 'Node matchers not supported' + }); + return; + } + call.write({ + config: getCurrentConfigList() + }); + }); + call.on('end', () => { + call.end(); + }); + } +} + +const loadedProto = loadSync('envoy/service/status/v3/csds.proto', { + keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true, + includeDirs: [ + // Paths are relative to src/build + __dirname + '/../../deps/envoy-api/', + __dirname + '/../../deps/xds/', + __dirname + '/../../deps/protoc-gen-validate/', + __dirname + '/../../deps/googleapis/' + ], +}); + +const csdsGrpcObject = loadPackageDefinition(loadedProto) as unknown as CsdsProtoGrpcType; +const csdsServiceDefinition = csdsGrpcObject.envoy.service.status.v3.ClientStatusDiscoveryService.service; + +export function setup() { + registerAdminService(() => csdsServiceDefinition, () => csdsImplementation); +} diff --git a/packages/grpc-js-xds/src/duration.ts b/packages/grpc-js-xds/src/duration.ts new file mode 100644 index 000000000..07f33651f --- /dev/null +++ b/packages/grpc-js-xds/src/duration.ts @@ -0,0 +1,33 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { experimental } from '@grpc/grpc-js'; +import { Duration__Output } from './generated/google/protobuf/Duration'; +import Duration = experimental.Duration; + +/** + * Convert a Duration protobuf message object to a Duration object as used in + * the ServiceConfig definition. The difference is that the protobuf message + * defines seconds as a long, which is represented as a string in JavaScript, + * and the one used in the service config defines it as a number. + * @param duration + */ +export function protoDurationToDuration(duration: Duration__Output): Duration { + return { + seconds: Number.parseInt(duration.seconds), + nanos: duration.nanos + }; +} diff --git a/packages/grpc-js-xds/src/environment.ts b/packages/grpc-js-xds/src/environment.ts index c2c7f2e05..e32d788a6 100644 --- a/packages/grpc-js-xds/src/environment.ts +++ b/packages/grpc-js-xds/src/environment.ts @@ -13,4 +13,15 @@ * See the License for the specific language governing permissions and * limitations under the License. * - */ \ No newline at end of file + */ + +/* Switches to enable or disable experimental features. If the default is + * 'true', the feature is enabled by default, if the default is 'false' the + * feature is disabled by default. */ +export const EXPERIMENTAL_FAULT_INJECTION = (process.env.GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION ?? 'true') === 'true'; +export const EXPERIMENTAL_OUTLIER_DETECTION = (process.env.GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION ?? 'true') === 'true'; +export const EXPERIMENTAL_RETRY = (process.env.GRPC_XDS_EXPERIMENTAL_ENABLE_RETRY ?? 'true') === 'true'; +export const EXPERIMENTAL_FEDERATION = (process.env.GRPC_EXPERIMENTAL_XDS_FEDERATION ?? 'false') === 'true'; +export const EXPERIMENTAL_CUSTOM_LB_CONFIG = (process.env.GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG ?? 'true') === 'true'; +export const EXPERIMENTAL_RING_HASH = (process.env.GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH ?? 'true') === 'true'; +export const EXPERIMENTAL_PICK_FIRST = (process.env.GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG ?? 'false') === 'true'; diff --git a/packages/grpc-js-xds/src/fraction.ts b/packages/grpc-js-xds/src/fraction.ts new file mode 100644 index 000000000..709af72b2 --- /dev/null +++ b/packages/grpc-js-xds/src/fraction.ts @@ -0,0 +1,39 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { FractionalPercent__Output } from "./generated/envoy/type/v3/FractionalPercent"; + +export interface Fraction { + numerator: number; + denominator: number; +} + +export function fractionToString(fraction: Fraction): string { + return `${fraction.numerator}/${fraction.denominator}`; +} + +const RUNTIME_FRACTION_DENOMINATOR_VALUES = { + HUNDRED: 100, + TEN_THOUSAND: 10_000, + MILLION: 1_000_000 +} + +export function envoyFractionToFraction(envoyFraction: FractionalPercent__Output): Fraction { + return { + numerator: envoyFraction.numerator, + denominator: RUNTIME_FRACTION_DENOMINATOR_VALUES[envoyFraction.denominator] + }; +} \ No newline at end of file diff --git a/packages/grpc-js-xds/src/generated/ads.ts b/packages/grpc-js-xds/src/generated/ads.ts index 0eacd1e34..d7483075c 100644 --- a/packages/grpc-js-xds/src/generated/ads.ts +++ b/packages/grpc-js-xds/src/generated/ads.ts @@ -1,7 +1,7 @@ import type * as grpc from '@grpc/grpc-js'; -import type { ServiceDefinition, EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; +import type { EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; -import type { AggregatedDiscoveryServiceClient as _envoy_service_discovery_v2_AggregatedDiscoveryServiceClient } from './envoy/service/discovery/v2/AggregatedDiscoveryService'; +import type { AggregatedDiscoveryServiceClient as _envoy_service_discovery_v3_AggregatedDiscoveryServiceClient, AggregatedDiscoveryServiceDefinition as _envoy_service_discovery_v3_AggregatedDiscoveryServiceDefinition } from './envoy/service/discovery/v3/AggregatedDiscoveryService'; type SubtypeConstructor any, Subtype> = { new(...args: ConstructorParameters): Subtype; @@ -9,14 +9,11 @@ type SubtypeConstructor any, Subtype> export interface ProtoGrpcType { envoy: { - api: { - v2: { - DeltaDiscoveryRequest: MessageTypeDefinition - DeltaDiscoveryResponse: MessageTypeDefinition - DiscoveryRequest: MessageTypeDefinition - DiscoveryResponse: MessageTypeDefinition - Resource: MessageTypeDefinition - core: { + annotations: { + } + config: { + core: { + v3: { Address: MessageTypeDefinition AsyncDataSource: MessageTypeDefinition BackoffStrategy: MessageTypeDefinition @@ -25,7 +22,9 @@ export interface ProtoGrpcType { CidrRange: MessageTypeDefinition ControlPlane: MessageTypeDefinition DataSource: MessageTypeDefinition + EnvoyInternalAddress: MessageTypeDefinition Extension: MessageTypeDefinition + ExtraSourceAddress: MessageTypeDefinition HeaderMap: MessageTypeDefinition HeaderValue: MessageTypeDefinition HeaderValueOption: MessageTypeDefinition @@ -34,6 +33,7 @@ export interface ProtoGrpcType { Metadata: MessageTypeDefinition Node: MessageTypeDefinition Pipe: MessageTypeDefinition + QueryParameter: MessageTypeDefinition RemoteDataSource: MessageTypeDefinition RequestMethod: EnumTypeDefinition RetryPolicy: MessageTypeDefinition @@ -41,35 +41,48 @@ export interface ProtoGrpcType { RuntimeDouble: MessageTypeDefinition RuntimeFeatureFlag: MessageTypeDefinition RuntimeFractionalPercent: MessageTypeDefinition + RuntimePercent: MessageTypeDefinition RuntimeUInt32: MessageTypeDefinition SocketAddress: MessageTypeDefinition SocketOption: MessageTypeDefinition + SocketOptionsOverride: MessageTypeDefinition TcpKeepalive: MessageTypeDefinition TrafficDirection: EnumTypeDefinition TransportSocket: MessageTypeDefinition + WatchedDirectory: MessageTypeDefinition } } } service: { discovery: { - v2: { + v3: { AdsDummy: MessageTypeDefinition /** - * See https://github.com/lyft/envoy-api#apis for a description of the role of + * See https://github.com/envoyproxy/envoy-api#apis for a description of the role of * ADS and how it is intended to be used by a management server. ADS requests * have the same structure as their singleton xDS counterparts, but can * multiplex many resource types on a single stream. The type_url in the * DiscoveryRequest/DiscoveryResponse provides sufficient information to recover * the multiplexed singleton APIs at the Envoy instance and management server. */ - AggregatedDiscoveryService: SubtypeConstructor & { service: ServiceDefinition } + AggregatedDiscoveryService: SubtypeConstructor & { service: _envoy_service_discovery_v3_AggregatedDiscoveryServiceDefinition } + DeltaDiscoveryRequest: MessageTypeDefinition + DeltaDiscoveryResponse: MessageTypeDefinition + DiscoveryRequest: MessageTypeDefinition + DiscoveryResponse: MessageTypeDefinition + DynamicParameterConstraints: MessageTypeDefinition + Resource: MessageTypeDefinition + ResourceLocator: MessageTypeDefinition + ResourceName: MessageTypeDefinition } } } type: { - FractionalPercent: MessageTypeDefinition - Percent: MessageTypeDefinition - SemanticVersion: MessageTypeDefinition + v3: { + FractionalPercent: MessageTypeDefinition + Percent: MessageTypeDefinition + SemanticVersion: MessageTypeDefinition + } } } google: { @@ -122,6 +135,7 @@ export interface ProtoGrpcType { MigrateAnnotation: MessageTypeDefinition PackageVersionStatus: EnumTypeDefinition StatusAnnotation: MessageTypeDefinition + VersioningAnnotation: MessageTypeDefinition } } validate: { @@ -150,5 +164,21 @@ export interface ProtoGrpcType { UInt32Rules: MessageTypeDefinition UInt64Rules: MessageTypeDefinition } + xds: { + annotations: { + v3: { + FieldStatusAnnotation: MessageTypeDefinition + FileStatusAnnotation: MessageTypeDefinition + MessageStatusAnnotation: MessageTypeDefinition + PackageVersionStatus: EnumTypeDefinition + StatusAnnotation: MessageTypeDefinition + } + } + core: { + v3: { + ContextParams: MessageTypeDefinition + } + } + } } diff --git a/packages/grpc-js-xds/src/generated/cluster.ts b/packages/grpc-js-xds/src/generated/cluster.ts index b7005a3f0..6e8c5f985 100644 --- a/packages/grpc-js-xds/src/generated/cluster.ts +++ b/packages/grpc-js-xds/src/generated/cluster.ts @@ -1,5 +1,5 @@ import type * as grpc from '@grpc/grpc-js'; -import type { ServiceDefinition, EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; +import type { EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; type SubtypeConstructor any, Subtype> = { @@ -10,34 +10,24 @@ export interface ProtoGrpcType { envoy: { annotations: { } - api: { - v2: { - Cluster: MessageTypeDefinition - ClusterLoadAssignment: MessageTypeDefinition - LoadBalancingPolicy: MessageTypeDefinition - UpstreamBindConfig: MessageTypeDefinition - UpstreamConnectionOptions: MessageTypeDefinition - auth: { - CertificateValidationContext: MessageTypeDefinition - CommonTlsContext: MessageTypeDefinition - DownstreamTlsContext: MessageTypeDefinition - GenericSecret: MessageTypeDefinition - PrivateKeyProvider: MessageTypeDefinition - SdsSecretConfig: MessageTypeDefinition - Secret: MessageTypeDefinition - TlsCertificate: MessageTypeDefinition - TlsParameters: MessageTypeDefinition - TlsSessionTicketKeys: MessageTypeDefinition - UpstreamTlsContext: MessageTypeDefinition - } - cluster: { + config: { + cluster: { + v3: { CircuitBreakers: MessageTypeDefinition + Cluster: MessageTypeDefinition + ClusterCollection: MessageTypeDefinition Filter: MessageTypeDefinition + LoadBalancingPolicy: MessageTypeDefinition OutlierDetection: MessageTypeDefinition + TrackClusterStats: MessageTypeDefinition + UpstreamConnectionOptions: MessageTypeDefinition } - core: { + } + core: { + v3: { Address: MessageTypeDefinition AggregatedConfigSource: MessageTypeDefinition + AlternateProtocolsCacheOptions: MessageTypeDefinition ApiConfigSource: MessageTypeDefinition ApiVersion: EnumTypeDefinition AsyncDataSource: MessageTypeDefinition @@ -48,8 +38,13 @@ export interface ProtoGrpcType { ConfigSource: MessageTypeDefinition ControlPlane: MessageTypeDefinition DataSource: MessageTypeDefinition + DnsResolutionConfig: MessageTypeDefinition + DnsResolverOptions: MessageTypeDefinition + EnvoyInternalAddress: MessageTypeDefinition EventServiceConfig: MessageTypeDefinition Extension: MessageTypeDefinition + ExtensionConfigSource: MessageTypeDefinition + ExtraSourceAddress: MessageTypeDefinition GrpcProtocolOptions: MessageTypeDefinition GrpcService: MessageTypeDefinition HeaderMap: MessageTypeDefinition @@ -57,14 +52,21 @@ export interface ProtoGrpcType { HeaderValueOption: MessageTypeDefinition HealthCheck: MessageTypeDefinition HealthStatus: EnumTypeDefinition + HealthStatusSet: MessageTypeDefinition Http1ProtocolOptions: MessageTypeDefinition Http2ProtocolOptions: MessageTypeDefinition + Http3ProtocolOptions: MessageTypeDefinition HttpProtocolOptions: MessageTypeDefinition HttpUri: MessageTypeDefinition + KeepaliveSettings: MessageTypeDefinition Locality: MessageTypeDefinition Metadata: MessageTypeDefinition Node: MessageTypeDefinition + PathConfigSource: MessageTypeDefinition Pipe: MessageTypeDefinition + QueryParameter: MessageTypeDefinition + QuicKeepAliveSettings: MessageTypeDefinition + QuicProtocolOptions: MessageTypeDefinition RateLimitSettings: MessageTypeDefinition RemoteDataSource: MessageTypeDefinition RequestMethod: EnumTypeDefinition @@ -73,45 +75,62 @@ export interface ProtoGrpcType { RuntimeDouble: MessageTypeDefinition RuntimeFeatureFlag: MessageTypeDefinition RuntimeFractionalPercent: MessageTypeDefinition + RuntimePercent: MessageTypeDefinition RuntimeUInt32: MessageTypeDefinition + SchemeHeaderTransformation: MessageTypeDefinition SelfConfigSource: MessageTypeDefinition SocketAddress: MessageTypeDefinition SocketOption: MessageTypeDefinition + SocketOptionsOverride: MessageTypeDefinition TcpKeepalive: MessageTypeDefinition TcpProtocolOptions: MessageTypeDefinition TrafficDirection: EnumTypeDefinition TransportSocket: MessageTypeDefinition + TypedExtensionConfig: MessageTypeDefinition UpstreamHttpProtocolOptions: MessageTypeDefinition + WatchedDirectory: MessageTypeDefinition } - endpoint: { + } + endpoint: { + v3: { + ClusterLoadAssignment: MessageTypeDefinition Endpoint: MessageTypeDefinition LbEndpoint: MessageTypeDefinition + LedsClusterLocalityConfig: MessageTypeDefinition LocalityLbEndpoints: MessageTypeDefinition } } } + extensions: { + clusters: { + aggregate: { + v3: { + ClusterConfig: MessageTypeDefinition + } + } + } + } type: { - CodecClientType: EnumTypeDefinition - DoubleRange: MessageTypeDefinition - FractionalPercent: MessageTypeDefinition - Int32Range: MessageTypeDefinition - Int64Range: MessageTypeDefinition - Percent: MessageTypeDefinition - SemanticVersion: MessageTypeDefinition matcher: { - ListStringMatcher: MessageTypeDefinition - RegexMatchAndSubstitute: MessageTypeDefinition - RegexMatcher: MessageTypeDefinition - StringMatcher: MessageTypeDefinition + v3: { + ListStringMatcher: MessageTypeDefinition + RegexMatchAndSubstitute: MessageTypeDefinition + RegexMatcher: MessageTypeDefinition + StringMatcher: MessageTypeDefinition + } + } + v3: { + CodecClientType: EnumTypeDefinition + DoubleRange: MessageTypeDefinition + FractionalPercent: MessageTypeDefinition + Int32Range: MessageTypeDefinition + Int64Range: MessageTypeDefinition + Percent: MessageTypeDefinition + SemanticVersion: MessageTypeDefinition } } } google: { - api: { - CustomHttpPattern: MessageTypeDefinition - Http: MessageTypeDefinition - HttpRule: MessageTypeDefinition - } protobuf: { Any: MessageTypeDefinition BoolValue: MessageTypeDefinition @@ -155,10 +174,12 @@ export interface ProtoGrpcType { udpa: { annotations: { FieldMigrateAnnotation: MessageTypeDefinition + FieldSecurityAnnotation: MessageTypeDefinition FileMigrateAnnotation: MessageTypeDefinition MigrateAnnotation: MessageTypeDefinition PackageVersionStatus: EnumTypeDefinition StatusAnnotation: MessageTypeDefinition + VersioningAnnotation: MessageTypeDefinition } } validate: { @@ -187,5 +208,24 @@ export interface ProtoGrpcType { UInt32Rules: MessageTypeDefinition UInt64Rules: MessageTypeDefinition } + xds: { + annotations: { + v3: { + FieldStatusAnnotation: MessageTypeDefinition + FileStatusAnnotation: MessageTypeDefinition + MessageStatusAnnotation: MessageTypeDefinition + PackageVersionStatus: EnumTypeDefinition + StatusAnnotation: MessageTypeDefinition + } + } + core: { + v3: { + Authority: MessageTypeDefinition + CollectionEntry: MessageTypeDefinition + ContextParams: MessageTypeDefinition + ResourceLocator: MessageTypeDefinition + } + } + } } diff --git a/packages/grpc-js-xds/src/generated/csds.ts b/packages/grpc-js-xds/src/generated/csds.ts new file mode 100644 index 000000000..e09151f50 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/csds.ts @@ -0,0 +1,208 @@ +import type * as grpc from '@grpc/grpc-js'; +import type { EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; + +import type { ClientStatusDiscoveryServiceClient as _envoy_service_status_v3_ClientStatusDiscoveryServiceClient, ClientStatusDiscoveryServiceDefinition as _envoy_service_status_v3_ClientStatusDiscoveryServiceDefinition } from './envoy/service/status/v3/ClientStatusDiscoveryService'; + +type SubtypeConstructor any, Subtype> = { + new(...args: ConstructorParameters): Subtype; +}; + +export interface ProtoGrpcType { + envoy: { + admin: { + v3: { + ClientResourceStatus: EnumTypeDefinition + ClustersConfigDump: MessageTypeDefinition + EcdsConfigDump: MessageTypeDefinition + EndpointsConfigDump: MessageTypeDefinition + ListenersConfigDump: MessageTypeDefinition + RoutesConfigDump: MessageTypeDefinition + ScopedRoutesConfigDump: MessageTypeDefinition + UpdateFailureState: MessageTypeDefinition + } + } + annotations: { + } + config: { + core: { + v3: { + Address: MessageTypeDefinition + AsyncDataSource: MessageTypeDefinition + BackoffStrategy: MessageTypeDefinition + BindConfig: MessageTypeDefinition + BuildVersion: MessageTypeDefinition + CidrRange: MessageTypeDefinition + ControlPlane: MessageTypeDefinition + DataSource: MessageTypeDefinition + EnvoyInternalAddress: MessageTypeDefinition + Extension: MessageTypeDefinition + ExtraSourceAddress: MessageTypeDefinition + HeaderMap: MessageTypeDefinition + HeaderValue: MessageTypeDefinition + HeaderValueOption: MessageTypeDefinition + HttpUri: MessageTypeDefinition + Locality: MessageTypeDefinition + Metadata: MessageTypeDefinition + Node: MessageTypeDefinition + Pipe: MessageTypeDefinition + QueryParameter: MessageTypeDefinition + RemoteDataSource: MessageTypeDefinition + RequestMethod: EnumTypeDefinition + RetryPolicy: MessageTypeDefinition + RoutingPriority: EnumTypeDefinition + RuntimeDouble: MessageTypeDefinition + RuntimeFeatureFlag: MessageTypeDefinition + RuntimeFractionalPercent: MessageTypeDefinition + RuntimePercent: MessageTypeDefinition + RuntimeUInt32: MessageTypeDefinition + SocketAddress: MessageTypeDefinition + SocketOption: MessageTypeDefinition + SocketOptionsOverride: MessageTypeDefinition + TcpKeepalive: MessageTypeDefinition + TrafficDirection: EnumTypeDefinition + TransportSocket: MessageTypeDefinition + WatchedDirectory: MessageTypeDefinition + } + } + } + service: { + status: { + v3: { + ClientConfig: MessageTypeDefinition + ClientConfigStatus: EnumTypeDefinition + /** + * CSDS is Client Status Discovery Service. It can be used to get the status of + * an xDS-compliant client from the management server's point of view. It can + * also be used to get the current xDS states directly from the client. + */ + ClientStatusDiscoveryService: SubtypeConstructor & { service: _envoy_service_status_v3_ClientStatusDiscoveryServiceDefinition } + ClientStatusRequest: MessageTypeDefinition + ClientStatusResponse: MessageTypeDefinition + ConfigStatus: EnumTypeDefinition + PerXdsConfig: MessageTypeDefinition + } + } + } + type: { + matcher: { + v3: { + DoubleMatcher: MessageTypeDefinition + ListMatcher: MessageTypeDefinition + ListStringMatcher: MessageTypeDefinition + NodeMatcher: MessageTypeDefinition + RegexMatchAndSubstitute: MessageTypeDefinition + RegexMatcher: MessageTypeDefinition + StringMatcher: MessageTypeDefinition + StructMatcher: MessageTypeDefinition + ValueMatcher: MessageTypeDefinition + } + } + v3: { + DoubleRange: MessageTypeDefinition + FractionalPercent: MessageTypeDefinition + Int32Range: MessageTypeDefinition + Int64Range: MessageTypeDefinition + Percent: MessageTypeDefinition + SemanticVersion: MessageTypeDefinition + } + } + } + google: { + api: { + CustomHttpPattern: MessageTypeDefinition + Http: MessageTypeDefinition + HttpRule: MessageTypeDefinition + } + protobuf: { + Any: MessageTypeDefinition + BoolValue: MessageTypeDefinition + BytesValue: MessageTypeDefinition + DescriptorProto: MessageTypeDefinition + DoubleValue: MessageTypeDefinition + Duration: MessageTypeDefinition + EnumDescriptorProto: MessageTypeDefinition + EnumOptions: MessageTypeDefinition + EnumValueDescriptorProto: MessageTypeDefinition + EnumValueOptions: MessageTypeDefinition + FieldDescriptorProto: MessageTypeDefinition + FieldOptions: MessageTypeDefinition + FileDescriptorProto: MessageTypeDefinition + FileDescriptorSet: MessageTypeDefinition + FileOptions: MessageTypeDefinition + FloatValue: MessageTypeDefinition + GeneratedCodeInfo: MessageTypeDefinition + Int32Value: MessageTypeDefinition + Int64Value: MessageTypeDefinition + ListValue: MessageTypeDefinition + MessageOptions: MessageTypeDefinition + MethodDescriptorProto: MessageTypeDefinition + MethodOptions: MessageTypeDefinition + NullValue: EnumTypeDefinition + OneofDescriptorProto: MessageTypeDefinition + OneofOptions: MessageTypeDefinition + ServiceDescriptorProto: MessageTypeDefinition + ServiceOptions: MessageTypeDefinition + SourceCodeInfo: MessageTypeDefinition + StringValue: MessageTypeDefinition + Struct: MessageTypeDefinition + Timestamp: MessageTypeDefinition + UInt32Value: MessageTypeDefinition + UInt64Value: MessageTypeDefinition + UninterpretedOption: MessageTypeDefinition + Value: MessageTypeDefinition + } + } + udpa: { + annotations: { + FieldMigrateAnnotation: MessageTypeDefinition + FileMigrateAnnotation: MessageTypeDefinition + MigrateAnnotation: MessageTypeDefinition + PackageVersionStatus: EnumTypeDefinition + StatusAnnotation: MessageTypeDefinition + VersioningAnnotation: MessageTypeDefinition + } + } + validate: { + AnyRules: MessageTypeDefinition + BoolRules: MessageTypeDefinition + BytesRules: MessageTypeDefinition + DoubleRules: MessageTypeDefinition + DurationRules: MessageTypeDefinition + EnumRules: MessageTypeDefinition + FieldRules: MessageTypeDefinition + Fixed32Rules: MessageTypeDefinition + Fixed64Rules: MessageTypeDefinition + FloatRules: MessageTypeDefinition + Int32Rules: MessageTypeDefinition + Int64Rules: MessageTypeDefinition + KnownRegex: EnumTypeDefinition + MapRules: MessageTypeDefinition + MessageRules: MessageTypeDefinition + RepeatedRules: MessageTypeDefinition + SFixed32Rules: MessageTypeDefinition + SFixed64Rules: MessageTypeDefinition + SInt32Rules: MessageTypeDefinition + SInt64Rules: MessageTypeDefinition + StringRules: MessageTypeDefinition + TimestampRules: MessageTypeDefinition + UInt32Rules: MessageTypeDefinition + UInt64Rules: MessageTypeDefinition + } + xds: { + annotations: { + v3: { + FieldStatusAnnotation: MessageTypeDefinition + FileStatusAnnotation: MessageTypeDefinition + MessageStatusAnnotation: MessageTypeDefinition + PackageVersionStatus: EnumTypeDefinition + StatusAnnotation: MessageTypeDefinition + } + } + core: { + v3: { + ContextParams: MessageTypeDefinition + } + } + } +} + diff --git a/packages/grpc-js-xds/src/generated/endpoint.ts b/packages/grpc-js-xds/src/generated/endpoint.ts index 33d5872ef..4fcf914e3 100644 --- a/packages/grpc-js-xds/src/generated/endpoint.ts +++ b/packages/grpc-js-xds/src/generated/endpoint.ts @@ -1,5 +1,5 @@ import type * as grpc from '@grpc/grpc-js'; -import type { ServiceDefinition, EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; +import type { EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; type SubtypeConstructor any, Subtype> = { @@ -10,31 +10,41 @@ export interface ProtoGrpcType { envoy: { annotations: { } - api: { - v2: { - ClusterLoadAssignment: MessageTypeDefinition - core: { + config: { + core: { + v3: { Address: MessageTypeDefinition + AggregatedConfigSource: MessageTypeDefinition + ApiConfigSource: MessageTypeDefinition + ApiVersion: EnumTypeDefinition AsyncDataSource: MessageTypeDefinition BackoffStrategy: MessageTypeDefinition BindConfig: MessageTypeDefinition BuildVersion: MessageTypeDefinition CidrRange: MessageTypeDefinition + ConfigSource: MessageTypeDefinition ControlPlane: MessageTypeDefinition DataSource: MessageTypeDefinition + EnvoyInternalAddress: MessageTypeDefinition EventServiceConfig: MessageTypeDefinition Extension: MessageTypeDefinition + ExtensionConfigSource: MessageTypeDefinition + ExtraSourceAddress: MessageTypeDefinition GrpcService: MessageTypeDefinition HeaderMap: MessageTypeDefinition HeaderValue: MessageTypeDefinition HeaderValueOption: MessageTypeDefinition HealthCheck: MessageTypeDefinition HealthStatus: EnumTypeDefinition + HealthStatusSet: MessageTypeDefinition HttpUri: MessageTypeDefinition Locality: MessageTypeDefinition Metadata: MessageTypeDefinition Node: MessageTypeDefinition + PathConfigSource: MessageTypeDefinition Pipe: MessageTypeDefinition + QueryParameter: MessageTypeDefinition + RateLimitSettings: MessageTypeDefinition RemoteDataSource: MessageTypeDefinition RequestMethod: EnumTypeDefinition RetryPolicy: MessageTypeDefinition @@ -42,42 +52,50 @@ export interface ProtoGrpcType { RuntimeDouble: MessageTypeDefinition RuntimeFeatureFlag: MessageTypeDefinition RuntimeFractionalPercent: MessageTypeDefinition + RuntimePercent: MessageTypeDefinition RuntimeUInt32: MessageTypeDefinition + SelfConfigSource: MessageTypeDefinition SocketAddress: MessageTypeDefinition SocketOption: MessageTypeDefinition + SocketOptionsOverride: MessageTypeDefinition TcpKeepalive: MessageTypeDefinition TrafficDirection: EnumTypeDefinition TransportSocket: MessageTypeDefinition + TypedExtensionConfig: MessageTypeDefinition + WatchedDirectory: MessageTypeDefinition } - endpoint: { + } + endpoint: { + v3: { + ClusterLoadAssignment: MessageTypeDefinition Endpoint: MessageTypeDefinition LbEndpoint: MessageTypeDefinition + LedsClusterLocalityConfig: MessageTypeDefinition LocalityLbEndpoints: MessageTypeDefinition } } } type: { - CodecClientType: EnumTypeDefinition - DoubleRange: MessageTypeDefinition - FractionalPercent: MessageTypeDefinition - Int32Range: MessageTypeDefinition - Int64Range: MessageTypeDefinition - Percent: MessageTypeDefinition - SemanticVersion: MessageTypeDefinition matcher: { - ListStringMatcher: MessageTypeDefinition - RegexMatchAndSubstitute: MessageTypeDefinition - RegexMatcher: MessageTypeDefinition - StringMatcher: MessageTypeDefinition + v3: { + ListStringMatcher: MessageTypeDefinition + RegexMatchAndSubstitute: MessageTypeDefinition + RegexMatcher: MessageTypeDefinition + StringMatcher: MessageTypeDefinition + } + } + v3: { + CodecClientType: EnumTypeDefinition + DoubleRange: MessageTypeDefinition + FractionalPercent: MessageTypeDefinition + Int32Range: MessageTypeDefinition + Int64Range: MessageTypeDefinition + Percent: MessageTypeDefinition + SemanticVersion: MessageTypeDefinition } } } google: { - api: { - CustomHttpPattern: MessageTypeDefinition - Http: MessageTypeDefinition - HttpRule: MessageTypeDefinition - } protobuf: { Any: MessageTypeDefinition BoolValue: MessageTypeDefinition @@ -125,6 +143,7 @@ export interface ProtoGrpcType { MigrateAnnotation: MessageTypeDefinition PackageVersionStatus: EnumTypeDefinition StatusAnnotation: MessageTypeDefinition + VersioningAnnotation: MessageTypeDefinition } } validate: { @@ -153,5 +172,22 @@ export interface ProtoGrpcType { UInt32Rules: MessageTypeDefinition UInt64Rules: MessageTypeDefinition } + xds: { + annotations: { + v3: { + FieldStatusAnnotation: MessageTypeDefinition + FileStatusAnnotation: MessageTypeDefinition + MessageStatusAnnotation: MessageTypeDefinition + PackageVersionStatus: EnumTypeDefinition + StatusAnnotation: MessageTypeDefinition + } + } + core: { + v3: { + Authority: MessageTypeDefinition + ContextParams: MessageTypeDefinition + } + } + } } diff --git a/packages/grpc-js-xds/src/generated/envoy/admin/v3/ClientResourceStatus.ts b/packages/grpc-js-xds/src/generated/envoy/admin/v3/ClientResourceStatus.ts new file mode 100644 index 000000000..b7a78a338 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/admin/v3/ClientResourceStatus.ts @@ -0,0 +1,77 @@ +// Original file: deps/envoy-api/envoy/admin/v3/config_dump_shared.proto + +/** + * Resource status from the view of a xDS client, which tells the synchronization + * status between the xDS client and the xDS server. + */ +export const ClientResourceStatus = { + /** + * Resource status is not available/unknown. + */ + UNKNOWN: 'UNKNOWN', + /** + * Client requested this resource but hasn't received any update from management + * server. The client will not fail requests, but will queue them until update + * arrives or the client times out waiting for the resource. + */ + REQUESTED: 'REQUESTED', + /** + * This resource has been requested by the client but has either not been + * delivered by the server or was previously delivered by the server and then + * subsequently removed from resources provided by the server. For more + * information, please refer to the :ref:`"Knowing When a Requested Resource + * Does Not Exist" ` section. + */ + DOES_NOT_EXIST: 'DOES_NOT_EXIST', + /** + * Client received this resource and replied with ACK. + */ + ACKED: 'ACKED', + /** + * Client received this resource and replied with NACK. + */ + NACKED: 'NACKED', +} as const; + +/** + * Resource status from the view of a xDS client, which tells the synchronization + * status between the xDS client and the xDS server. + */ +export type ClientResourceStatus = + /** + * Resource status is not available/unknown. + */ + | 'UNKNOWN' + | 0 + /** + * Client requested this resource but hasn't received any update from management + * server. The client will not fail requests, but will queue them until update + * arrives or the client times out waiting for the resource. + */ + | 'REQUESTED' + | 1 + /** + * This resource has been requested by the client but has either not been + * delivered by the server or was previously delivered by the server and then + * subsequently removed from resources provided by the server. For more + * information, please refer to the :ref:`"Knowing When a Requested Resource + * Does Not Exist" ` section. + */ + | 'DOES_NOT_EXIST' + | 2 + /** + * Client received this resource and replied with ACK. + */ + | 'ACKED' + | 3 + /** + * Client received this resource and replied with NACK. + */ + | 'NACKED' + | 4 + +/** + * Resource status from the view of a xDS client, which tells the synchronization + * status between the xDS client and the xDS server. + */ +export type ClientResourceStatus__Output = typeof ClientResourceStatus[keyof typeof ClientResourceStatus] diff --git a/packages/grpc-js-xds/src/generated/envoy/admin/v3/ClustersConfigDump.ts b/packages/grpc-js-xds/src/generated/envoy/admin/v3/ClustersConfigDump.ts new file mode 100644 index 000000000..2c3b4f8a5 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/admin/v3/ClustersConfigDump.ts @@ -0,0 +1,164 @@ +// Original file: deps/envoy-api/envoy/admin/v3/config_dump_shared.proto + +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../google/protobuf/Any'; +import type { Timestamp as _google_protobuf_Timestamp, Timestamp__Output as _google_protobuf_Timestamp__Output } from '../../../google/protobuf/Timestamp'; +import type { UpdateFailureState as _envoy_admin_v3_UpdateFailureState, UpdateFailureState__Output as _envoy_admin_v3_UpdateFailureState__Output } from '../../../envoy/admin/v3/UpdateFailureState'; +import type { ClientResourceStatus as _envoy_admin_v3_ClientResourceStatus, ClientResourceStatus__Output as _envoy_admin_v3_ClientResourceStatus__Output } from '../../../envoy/admin/v3/ClientResourceStatus'; + +/** + * Describes a dynamically loaded cluster via the CDS API. + * [#next-free-field: 6] + */ +export interface _envoy_admin_v3_ClustersConfigDump_DynamicCluster { + /** + * This is the per-resource version information. This version is currently taken from the + * :ref:`version_info ` field at the time + * that the cluster was loaded. In the future, discrete per-cluster versions may be supported by + * the API. + */ + 'version_info'?: (string); + /** + * The cluster config. + */ + 'cluster'?: (_google_protobuf_Any | null); + /** + * The timestamp when the Cluster was last updated. + */ + 'last_updated'?: (_google_protobuf_Timestamp | null); + /** + * Set if the last update failed, cleared after the next successful update. + * The ``error_state`` field contains the rejected version of this particular + * resource along with the reason and timestamp. For successfully updated or + * acknowledged resource, this field should be empty. + * [#not-implemented-hide:] + */ + 'error_state'?: (_envoy_admin_v3_UpdateFailureState | null); + /** + * The client status of this resource. + * [#not-implemented-hide:] + */ + 'client_status'?: (_envoy_admin_v3_ClientResourceStatus); +} + +/** + * Describes a dynamically loaded cluster via the CDS API. + * [#next-free-field: 6] + */ +export interface _envoy_admin_v3_ClustersConfigDump_DynamicCluster__Output { + /** + * This is the per-resource version information. This version is currently taken from the + * :ref:`version_info ` field at the time + * that the cluster was loaded. In the future, discrete per-cluster versions may be supported by + * the API. + */ + 'version_info': (string); + /** + * The cluster config. + */ + 'cluster': (_google_protobuf_Any__Output | null); + /** + * The timestamp when the Cluster was last updated. + */ + 'last_updated': (_google_protobuf_Timestamp__Output | null); + /** + * Set if the last update failed, cleared after the next successful update. + * The ``error_state`` field contains the rejected version of this particular + * resource along with the reason and timestamp. For successfully updated or + * acknowledged resource, this field should be empty. + * [#not-implemented-hide:] + */ + 'error_state': (_envoy_admin_v3_UpdateFailureState__Output | null); + /** + * The client status of this resource. + * [#not-implemented-hide:] + */ + 'client_status': (_envoy_admin_v3_ClientResourceStatus__Output); +} + +/** + * Describes a statically loaded cluster. + */ +export interface _envoy_admin_v3_ClustersConfigDump_StaticCluster { + /** + * The cluster config. + */ + 'cluster'?: (_google_protobuf_Any | null); + /** + * The timestamp when the Cluster was last updated. + */ + 'last_updated'?: (_google_protobuf_Timestamp | null); +} + +/** + * Describes a statically loaded cluster. + */ +export interface _envoy_admin_v3_ClustersConfigDump_StaticCluster__Output { + /** + * The cluster config. + */ + 'cluster': (_google_protobuf_Any__Output | null); + /** + * The timestamp when the Cluster was last updated. + */ + 'last_updated': (_google_protobuf_Timestamp__Output | null); +} + +/** + * Envoy's cluster manager fills this message with all currently known clusters. Cluster + * configuration information can be used to recreate an Envoy configuration by populating all + * clusters as static clusters or by returning them in a CDS response. + */ +export interface ClustersConfigDump { + /** + * This is the :ref:`version_info ` in the + * last processed CDS discovery response. If there are only static bootstrap clusters, this field + * will be "". + */ + 'version_info'?: (string); + /** + * The statically loaded cluster configs. + */ + 'static_clusters'?: (_envoy_admin_v3_ClustersConfigDump_StaticCluster)[]; + /** + * The dynamically loaded active clusters. These are clusters that are available to service + * data plane traffic. + */ + 'dynamic_active_clusters'?: (_envoy_admin_v3_ClustersConfigDump_DynamicCluster)[]; + /** + * The dynamically loaded warming clusters. These are clusters that are currently undergoing + * warming in preparation to service data plane traffic. Note that if attempting to recreate an + * Envoy configuration from a configuration dump, the warming clusters should generally be + * discarded. + */ + 'dynamic_warming_clusters'?: (_envoy_admin_v3_ClustersConfigDump_DynamicCluster)[]; +} + +/** + * Envoy's cluster manager fills this message with all currently known clusters. Cluster + * configuration information can be used to recreate an Envoy configuration by populating all + * clusters as static clusters or by returning them in a CDS response. + */ +export interface ClustersConfigDump__Output { + /** + * This is the :ref:`version_info ` in the + * last processed CDS discovery response. If there are only static bootstrap clusters, this field + * will be "". + */ + 'version_info': (string); + /** + * The statically loaded cluster configs. + */ + 'static_clusters': (_envoy_admin_v3_ClustersConfigDump_StaticCluster__Output)[]; + /** + * The dynamically loaded active clusters. These are clusters that are available to service + * data plane traffic. + */ + 'dynamic_active_clusters': (_envoy_admin_v3_ClustersConfigDump_DynamicCluster__Output)[]; + /** + * The dynamically loaded warming clusters. These are clusters that are currently undergoing + * warming in preparation to service data plane traffic. Note that if attempting to recreate an + * Envoy configuration from a configuration dump, the warming clusters should generally be + * discarded. + */ + 'dynamic_warming_clusters': (_envoy_admin_v3_ClustersConfigDump_DynamicCluster__Output)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/admin/v3/EcdsConfigDump.ts b/packages/grpc-js-xds/src/generated/envoy/admin/v3/EcdsConfigDump.ts new file mode 100644 index 000000000..70562962b --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/admin/v3/EcdsConfigDump.ts @@ -0,0 +1,100 @@ +// Original file: deps/envoy-api/envoy/admin/v3/config_dump_shared.proto + +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../google/protobuf/Any'; +import type { Timestamp as _google_protobuf_Timestamp, Timestamp__Output as _google_protobuf_Timestamp__Output } from '../../../google/protobuf/Timestamp'; +import type { UpdateFailureState as _envoy_admin_v3_UpdateFailureState, UpdateFailureState__Output as _envoy_admin_v3_UpdateFailureState__Output } from '../../../envoy/admin/v3/UpdateFailureState'; +import type { ClientResourceStatus as _envoy_admin_v3_ClientResourceStatus, ClientResourceStatus__Output as _envoy_admin_v3_ClientResourceStatus__Output } from '../../../envoy/admin/v3/ClientResourceStatus'; + +/** + * [#next-free-field: 6] + */ +export interface _envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig { + /** + * This is the per-resource version information. This version is currently + * taken from the :ref:`version_info + * ` + * field at the time that the ECDS filter was loaded. + */ + 'version_info'?: (string); + /** + * The ECDS filter config. + */ + 'ecds_filter'?: (_google_protobuf_Any | null); + /** + * The timestamp when the ECDS filter was last updated. + */ + 'last_updated'?: (_google_protobuf_Timestamp | null); + /** + * Set if the last update failed, cleared after the next successful update. + * The ``error_state`` field contains the rejected version of this + * particular resource along with the reason and timestamp. For successfully + * updated or acknowledged resource, this field should be empty. + * [#not-implemented-hide:] + */ + 'error_state'?: (_envoy_admin_v3_UpdateFailureState | null); + /** + * The client status of this resource. + * [#not-implemented-hide:] + */ + 'client_status'?: (_envoy_admin_v3_ClientResourceStatus); +} + +/** + * [#next-free-field: 6] + */ +export interface _envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig__Output { + /** + * This is the per-resource version information. This version is currently + * taken from the :ref:`version_info + * ` + * field at the time that the ECDS filter was loaded. + */ + 'version_info': (string); + /** + * The ECDS filter config. + */ + 'ecds_filter': (_google_protobuf_Any__Output | null); + /** + * The timestamp when the ECDS filter was last updated. + */ + 'last_updated': (_google_protobuf_Timestamp__Output | null); + /** + * Set if the last update failed, cleared after the next successful update. + * The ``error_state`` field contains the rejected version of this + * particular resource along with the reason and timestamp. For successfully + * updated or acknowledged resource, this field should be empty. + * [#not-implemented-hide:] + */ + 'error_state': (_envoy_admin_v3_UpdateFailureState__Output | null); + /** + * The client status of this resource. + * [#not-implemented-hide:] + */ + 'client_status': (_envoy_admin_v3_ClientResourceStatus__Output); +} + +/** + * Envoy's ECDS service fills this message with all currently extension + * configuration. Extension configuration information can be used to recreate + * an Envoy ECDS listener and HTTP filters as static filters or by returning + * them in ECDS response. + */ +export interface EcdsConfigDump { + /** + * The ECDS filter configs. + */ + 'ecds_filters'?: (_envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig)[]; +} + +/** + * Envoy's ECDS service fills this message with all currently extension + * configuration. Extension configuration information can be used to recreate + * an Envoy ECDS listener and HTTP filters as static filters or by returning + * them in ECDS response. + */ +export interface EcdsConfigDump__Output { + /** + * The ECDS filter configs. + */ + 'ecds_filters': (_envoy_admin_v3_EcdsConfigDump_EcdsFilterConfig__Output)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/admin/v3/EndpointsConfigDump.ts b/packages/grpc-js-xds/src/generated/envoy/admin/v3/EndpointsConfigDump.ts new file mode 100644 index 000000000..3f362c5c3 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/admin/v3/EndpointsConfigDump.ts @@ -0,0 +1,126 @@ +// Original file: deps/envoy-api/envoy/admin/v3/config_dump_shared.proto + +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../google/protobuf/Any'; +import type { Timestamp as _google_protobuf_Timestamp, Timestamp__Output as _google_protobuf_Timestamp__Output } from '../../../google/protobuf/Timestamp'; +import type { UpdateFailureState as _envoy_admin_v3_UpdateFailureState, UpdateFailureState__Output as _envoy_admin_v3_UpdateFailureState__Output } from '../../../envoy/admin/v3/UpdateFailureState'; +import type { ClientResourceStatus as _envoy_admin_v3_ClientResourceStatus, ClientResourceStatus__Output as _envoy_admin_v3_ClientResourceStatus__Output } from '../../../envoy/admin/v3/ClientResourceStatus'; + +/** + * [#next-free-field: 6] + */ +export interface _envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig { + /** + * [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the + * :ref:`version_info ` field at the time that + * the endpoint configuration was loaded. + */ + 'version_info'?: (string); + /** + * The endpoint config. + */ + 'endpoint_config'?: (_google_protobuf_Any | null); + /** + * [#not-implemented-hide:] The timestamp when the Endpoint was last updated. + */ + 'last_updated'?: (_google_protobuf_Timestamp | null); + /** + * Set if the last update failed, cleared after the next successful update. + * The ``error_state`` field contains the rejected version of this particular + * resource along with the reason and timestamp. For successfully updated or + * acknowledged resource, this field should be empty. + * [#not-implemented-hide:] + */ + 'error_state'?: (_envoy_admin_v3_UpdateFailureState | null); + /** + * The client status of this resource. + * [#not-implemented-hide:] + */ + 'client_status'?: (_envoy_admin_v3_ClientResourceStatus); +} + +/** + * [#next-free-field: 6] + */ +export interface _envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig__Output { + /** + * [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the + * :ref:`version_info ` field at the time that + * the endpoint configuration was loaded. + */ + 'version_info': (string); + /** + * The endpoint config. + */ + 'endpoint_config': (_google_protobuf_Any__Output | null); + /** + * [#not-implemented-hide:] The timestamp when the Endpoint was last updated. + */ + 'last_updated': (_google_protobuf_Timestamp__Output | null); + /** + * Set if the last update failed, cleared after the next successful update. + * The ``error_state`` field contains the rejected version of this particular + * resource along with the reason and timestamp. For successfully updated or + * acknowledged resource, this field should be empty. + * [#not-implemented-hide:] + */ + 'error_state': (_envoy_admin_v3_UpdateFailureState__Output | null); + /** + * The client status of this resource. + * [#not-implemented-hide:] + */ + 'client_status': (_envoy_admin_v3_ClientResourceStatus__Output); +} + +export interface _envoy_admin_v3_EndpointsConfigDump_StaticEndpointConfig { + /** + * The endpoint config. + */ + 'endpoint_config'?: (_google_protobuf_Any | null); + /** + * [#not-implemented-hide:] The timestamp when the Endpoint was last updated. + */ + 'last_updated'?: (_google_protobuf_Timestamp | null); +} + +export interface _envoy_admin_v3_EndpointsConfigDump_StaticEndpointConfig__Output { + /** + * The endpoint config. + */ + 'endpoint_config': (_google_protobuf_Any__Output | null); + /** + * [#not-implemented-hide:] The timestamp when the Endpoint was last updated. + */ + 'last_updated': (_google_protobuf_Timestamp__Output | null); +} + +/** + * Envoy's admin fill this message with all currently known endpoints. Endpoint + * configuration information can be used to recreate an Envoy configuration by populating all + * endpoints as static endpoints or by returning them in an EDS response. + */ +export interface EndpointsConfigDump { + /** + * The statically loaded endpoint configs. + */ + 'static_endpoint_configs'?: (_envoy_admin_v3_EndpointsConfigDump_StaticEndpointConfig)[]; + /** + * The dynamically loaded endpoint configs. + */ + 'dynamic_endpoint_configs'?: (_envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig)[]; +} + +/** + * Envoy's admin fill this message with all currently known endpoints. Endpoint + * configuration information can be used to recreate an Envoy configuration by populating all + * endpoints as static endpoints or by returning them in an EDS response. + */ +export interface EndpointsConfigDump__Output { + /** + * The statically loaded endpoint configs. + */ + 'static_endpoint_configs': (_envoy_admin_v3_EndpointsConfigDump_StaticEndpointConfig__Output)[]; + /** + * The dynamically loaded endpoint configs. + */ + 'dynamic_endpoint_configs': (_envoy_admin_v3_EndpointsConfigDump_DynamicEndpointConfig__Output)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/admin/v3/ListenersConfigDump.ts b/packages/grpc-js-xds/src/generated/envoy/admin/v3/ListenersConfigDump.ts new file mode 100644 index 000000000..a90338fdf --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/admin/v3/ListenersConfigDump.ts @@ -0,0 +1,198 @@ +// Original file: deps/envoy-api/envoy/admin/v3/config_dump_shared.proto + +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../google/protobuf/Any'; +import type { Timestamp as _google_protobuf_Timestamp, Timestamp__Output as _google_protobuf_Timestamp__Output } from '../../../google/protobuf/Timestamp'; +import type { UpdateFailureState as _envoy_admin_v3_UpdateFailureState, UpdateFailureState__Output as _envoy_admin_v3_UpdateFailureState__Output } from '../../../envoy/admin/v3/UpdateFailureState'; +import type { ClientResourceStatus as _envoy_admin_v3_ClientResourceStatus, ClientResourceStatus__Output as _envoy_admin_v3_ClientResourceStatus__Output } from '../../../envoy/admin/v3/ClientResourceStatus'; + +/** + * Describes a dynamically loaded listener via the LDS API. + * [#next-free-field: 7] + */ +export interface _envoy_admin_v3_ListenersConfigDump_DynamicListener { + /** + * The name or unique id of this listener, pulled from the DynamicListenerState config. + */ + 'name'?: (string); + /** + * The listener state for any active listener by this name. + * These are listeners that are available to service data plane traffic. + */ + 'active_state'?: (_envoy_admin_v3_ListenersConfigDump_DynamicListenerState | null); + /** + * The listener state for any warming listener by this name. + * These are listeners that are currently undergoing warming in preparation to service data + * plane traffic. Note that if attempting to recreate an Envoy configuration from a + * configuration dump, the warming listeners should generally be discarded. + */ + 'warming_state'?: (_envoy_admin_v3_ListenersConfigDump_DynamicListenerState | null); + /** + * The listener state for any draining listener by this name. + * These are listeners that are currently undergoing draining in preparation to stop servicing + * data plane traffic. Note that if attempting to recreate an Envoy configuration from a + * configuration dump, the draining listeners should generally be discarded. + */ + 'draining_state'?: (_envoy_admin_v3_ListenersConfigDump_DynamicListenerState | null); + /** + * Set if the last update failed, cleared after the next successful update. + * The ``error_state`` field contains the rejected version of this particular + * resource along with the reason and timestamp. For successfully updated or + * acknowledged resource, this field should be empty. + */ + 'error_state'?: (_envoy_admin_v3_UpdateFailureState | null); + /** + * The client status of this resource. + * [#not-implemented-hide:] + */ + 'client_status'?: (_envoy_admin_v3_ClientResourceStatus); +} + +/** + * Describes a dynamically loaded listener via the LDS API. + * [#next-free-field: 7] + */ +export interface _envoy_admin_v3_ListenersConfigDump_DynamicListener__Output { + /** + * The name or unique id of this listener, pulled from the DynamicListenerState config. + */ + 'name': (string); + /** + * The listener state for any active listener by this name. + * These are listeners that are available to service data plane traffic. + */ + 'active_state': (_envoy_admin_v3_ListenersConfigDump_DynamicListenerState__Output | null); + /** + * The listener state for any warming listener by this name. + * These are listeners that are currently undergoing warming in preparation to service data + * plane traffic. Note that if attempting to recreate an Envoy configuration from a + * configuration dump, the warming listeners should generally be discarded. + */ + 'warming_state': (_envoy_admin_v3_ListenersConfigDump_DynamicListenerState__Output | null); + /** + * The listener state for any draining listener by this name. + * These are listeners that are currently undergoing draining in preparation to stop servicing + * data plane traffic. Note that if attempting to recreate an Envoy configuration from a + * configuration dump, the draining listeners should generally be discarded. + */ + 'draining_state': (_envoy_admin_v3_ListenersConfigDump_DynamicListenerState__Output | null); + /** + * Set if the last update failed, cleared after the next successful update. + * The ``error_state`` field contains the rejected version of this particular + * resource along with the reason and timestamp. For successfully updated or + * acknowledged resource, this field should be empty. + */ + 'error_state': (_envoy_admin_v3_UpdateFailureState__Output | null); + /** + * The client status of this resource. + * [#not-implemented-hide:] + */ + 'client_status': (_envoy_admin_v3_ClientResourceStatus__Output); +} + +export interface _envoy_admin_v3_ListenersConfigDump_DynamicListenerState { + /** + * This is the per-resource version information. This version is currently taken from the + * :ref:`version_info ` field at the time + * that the listener was loaded. In the future, discrete per-listener versions may be supported + * by the API. + */ + 'version_info'?: (string); + /** + * The listener config. + */ + 'listener'?: (_google_protobuf_Any | null); + /** + * The timestamp when the Listener was last successfully updated. + */ + 'last_updated'?: (_google_protobuf_Timestamp | null); +} + +export interface _envoy_admin_v3_ListenersConfigDump_DynamicListenerState__Output { + /** + * This is the per-resource version information. This version is currently taken from the + * :ref:`version_info ` field at the time + * that the listener was loaded. In the future, discrete per-listener versions may be supported + * by the API. + */ + 'version_info': (string); + /** + * The listener config. + */ + 'listener': (_google_protobuf_Any__Output | null); + /** + * The timestamp when the Listener was last successfully updated. + */ + 'last_updated': (_google_protobuf_Timestamp__Output | null); +} + +/** + * Describes a statically loaded listener. + */ +export interface _envoy_admin_v3_ListenersConfigDump_StaticListener { + /** + * The listener config. + */ + 'listener'?: (_google_protobuf_Any | null); + /** + * The timestamp when the Listener was last successfully updated. + */ + 'last_updated'?: (_google_protobuf_Timestamp | null); +} + +/** + * Describes a statically loaded listener. + */ +export interface _envoy_admin_v3_ListenersConfigDump_StaticListener__Output { + /** + * The listener config. + */ + 'listener': (_google_protobuf_Any__Output | null); + /** + * The timestamp when the Listener was last successfully updated. + */ + 'last_updated': (_google_protobuf_Timestamp__Output | null); +} + +/** + * Envoy's listener manager fills this message with all currently known listeners. Listener + * configuration information can be used to recreate an Envoy configuration by populating all + * listeners as static listeners or by returning them in a LDS response. + */ +export interface ListenersConfigDump { + /** + * This is the :ref:`version_info ` in the + * last processed LDS discovery response. If there are only static bootstrap listeners, this field + * will be "". + */ + 'version_info'?: (string); + /** + * The statically loaded listener configs. + */ + 'static_listeners'?: (_envoy_admin_v3_ListenersConfigDump_StaticListener)[]; + /** + * State for any warming, active, or draining listeners. + */ + 'dynamic_listeners'?: (_envoy_admin_v3_ListenersConfigDump_DynamicListener)[]; +} + +/** + * Envoy's listener manager fills this message with all currently known listeners. Listener + * configuration information can be used to recreate an Envoy configuration by populating all + * listeners as static listeners or by returning them in a LDS response. + */ +export interface ListenersConfigDump__Output { + /** + * This is the :ref:`version_info ` in the + * last processed LDS discovery response. If there are only static bootstrap listeners, this field + * will be "". + */ + 'version_info': (string); + /** + * The statically loaded listener configs. + */ + 'static_listeners': (_envoy_admin_v3_ListenersConfigDump_StaticListener__Output)[]; + /** + * State for any warming, active, or draining listeners. + */ + 'dynamic_listeners': (_envoy_admin_v3_ListenersConfigDump_DynamicListener__Output)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/admin/v3/RoutesConfigDump.ts b/packages/grpc-js-xds/src/generated/envoy/admin/v3/RoutesConfigDump.ts new file mode 100644 index 000000000..6de43f0eb --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/admin/v3/RoutesConfigDump.ts @@ -0,0 +1,130 @@ +// Original file: deps/envoy-api/envoy/admin/v3/config_dump_shared.proto + +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../google/protobuf/Any'; +import type { Timestamp as _google_protobuf_Timestamp, Timestamp__Output as _google_protobuf_Timestamp__Output } from '../../../google/protobuf/Timestamp'; +import type { UpdateFailureState as _envoy_admin_v3_UpdateFailureState, UpdateFailureState__Output as _envoy_admin_v3_UpdateFailureState__Output } from '../../../envoy/admin/v3/UpdateFailureState'; +import type { ClientResourceStatus as _envoy_admin_v3_ClientResourceStatus, ClientResourceStatus__Output as _envoy_admin_v3_ClientResourceStatus__Output } from '../../../envoy/admin/v3/ClientResourceStatus'; + +/** + * [#next-free-field: 6] + */ +export interface _envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig { + /** + * This is the per-resource version information. This version is currently taken from the + * :ref:`version_info ` field at the time that + * the route configuration was loaded. + */ + 'version_info'?: (string); + /** + * The route config. + */ + 'route_config'?: (_google_protobuf_Any | null); + /** + * The timestamp when the Route was last updated. + */ + 'last_updated'?: (_google_protobuf_Timestamp | null); + /** + * Set if the last update failed, cleared after the next successful update. + * The ``error_state`` field contains the rejected version of this particular + * resource along with the reason and timestamp. For successfully updated or + * acknowledged resource, this field should be empty. + * [#not-implemented-hide:] + */ + 'error_state'?: (_envoy_admin_v3_UpdateFailureState | null); + /** + * The client status of this resource. + * [#not-implemented-hide:] + */ + 'client_status'?: (_envoy_admin_v3_ClientResourceStatus); +} + +/** + * [#next-free-field: 6] + */ +export interface _envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig__Output { + /** + * This is the per-resource version information. This version is currently taken from the + * :ref:`version_info ` field at the time that + * the route configuration was loaded. + */ + 'version_info': (string); + /** + * The route config. + */ + 'route_config': (_google_protobuf_Any__Output | null); + /** + * The timestamp when the Route was last updated. + */ + 'last_updated': (_google_protobuf_Timestamp__Output | null); + /** + * Set if the last update failed, cleared after the next successful update. + * The ``error_state`` field contains the rejected version of this particular + * resource along with the reason and timestamp. For successfully updated or + * acknowledged resource, this field should be empty. + * [#not-implemented-hide:] + */ + 'error_state': (_envoy_admin_v3_UpdateFailureState__Output | null); + /** + * The client status of this resource. + * [#not-implemented-hide:] + */ + 'client_status': (_envoy_admin_v3_ClientResourceStatus__Output); +} + +export interface _envoy_admin_v3_RoutesConfigDump_StaticRouteConfig { + /** + * The route config. + */ + 'route_config'?: (_google_protobuf_Any | null); + /** + * The timestamp when the Route was last updated. + */ + 'last_updated'?: (_google_protobuf_Timestamp | null); +} + +export interface _envoy_admin_v3_RoutesConfigDump_StaticRouteConfig__Output { + /** + * The route config. + */ + 'route_config': (_google_protobuf_Any__Output | null); + /** + * The timestamp when the Route was last updated. + */ + 'last_updated': (_google_protobuf_Timestamp__Output | null); +} + +/** + * Envoy's RDS implementation fills this message with all currently loaded routes, as described by + * their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration + * or defined inline while configuring listeners are separated from those configured dynamically via RDS. + * Route configuration information can be used to recreate an Envoy configuration by populating all routes + * as static routes or by returning them in RDS responses. + */ +export interface RoutesConfigDump { + /** + * The statically loaded route configs. + */ + 'static_route_configs'?: (_envoy_admin_v3_RoutesConfigDump_StaticRouteConfig)[]; + /** + * The dynamically loaded route configs. + */ + 'dynamic_route_configs'?: (_envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig)[]; +} + +/** + * Envoy's RDS implementation fills this message with all currently loaded routes, as described by + * their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration + * or defined inline while configuring listeners are separated from those configured dynamically via RDS. + * Route configuration information can be used to recreate an Envoy configuration by populating all routes + * as static routes or by returning them in RDS responses. + */ +export interface RoutesConfigDump__Output { + /** + * The statically loaded route configs. + */ + 'static_route_configs': (_envoy_admin_v3_RoutesConfigDump_StaticRouteConfig__Output)[]; + /** + * The dynamically loaded route configs. + */ + 'dynamic_route_configs': (_envoy_admin_v3_RoutesConfigDump_DynamicRouteConfig__Output)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/admin/v3/ScopedRoutesConfigDump.ts b/packages/grpc-js-xds/src/generated/envoy/admin/v3/ScopedRoutesConfigDump.ts new file mode 100644 index 000000000..1ce3934cc --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/admin/v3/ScopedRoutesConfigDump.ts @@ -0,0 +1,144 @@ +// Original file: deps/envoy-api/envoy/admin/v3/config_dump_shared.proto + +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../google/protobuf/Any'; +import type { Timestamp as _google_protobuf_Timestamp, Timestamp__Output as _google_protobuf_Timestamp__Output } from '../../../google/protobuf/Timestamp'; +import type { UpdateFailureState as _envoy_admin_v3_UpdateFailureState, UpdateFailureState__Output as _envoy_admin_v3_UpdateFailureState__Output } from '../../../envoy/admin/v3/UpdateFailureState'; +import type { ClientResourceStatus as _envoy_admin_v3_ClientResourceStatus, ClientResourceStatus__Output as _envoy_admin_v3_ClientResourceStatus__Output } from '../../../envoy/admin/v3/ClientResourceStatus'; + +/** + * [#next-free-field: 7] + */ +export interface _envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs { + /** + * The name assigned to the scoped route configurations. + */ + 'name'?: (string); + /** + * This is the per-resource version information. This version is currently taken from the + * :ref:`version_info ` field at the time that + * the scoped routes configuration was loaded. + */ + 'version_info'?: (string); + /** + * The scoped route configurations. + */ + 'scoped_route_configs'?: (_google_protobuf_Any)[]; + /** + * The timestamp when the scoped route config set was last updated. + */ + 'last_updated'?: (_google_protobuf_Timestamp | null); + /** + * Set if the last update failed, cleared after the next successful update. + * The ``error_state`` field contains the rejected version of this particular + * resource along with the reason and timestamp. For successfully updated or + * acknowledged resource, this field should be empty. + * [#not-implemented-hide:] + */ + 'error_state'?: (_envoy_admin_v3_UpdateFailureState | null); + /** + * The client status of this resource. + * [#not-implemented-hide:] + */ + 'client_status'?: (_envoy_admin_v3_ClientResourceStatus); +} + +/** + * [#next-free-field: 7] + */ +export interface _envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs__Output { + /** + * The name assigned to the scoped route configurations. + */ + 'name': (string); + /** + * This is the per-resource version information. This version is currently taken from the + * :ref:`version_info ` field at the time that + * the scoped routes configuration was loaded. + */ + 'version_info': (string); + /** + * The scoped route configurations. + */ + 'scoped_route_configs': (_google_protobuf_Any__Output)[]; + /** + * The timestamp when the scoped route config set was last updated. + */ + 'last_updated': (_google_protobuf_Timestamp__Output | null); + /** + * Set if the last update failed, cleared after the next successful update. + * The ``error_state`` field contains the rejected version of this particular + * resource along with the reason and timestamp. For successfully updated or + * acknowledged resource, this field should be empty. + * [#not-implemented-hide:] + */ + 'error_state': (_envoy_admin_v3_UpdateFailureState__Output | null); + /** + * The client status of this resource. + * [#not-implemented-hide:] + */ + 'client_status': (_envoy_admin_v3_ClientResourceStatus__Output); +} + +export interface _envoy_admin_v3_ScopedRoutesConfigDump_InlineScopedRouteConfigs { + /** + * The name assigned to the scoped route configurations. + */ + 'name'?: (string); + /** + * The scoped route configurations. + */ + 'scoped_route_configs'?: (_google_protobuf_Any)[]; + /** + * The timestamp when the scoped route config set was last updated. + */ + 'last_updated'?: (_google_protobuf_Timestamp | null); +} + +export interface _envoy_admin_v3_ScopedRoutesConfigDump_InlineScopedRouteConfigs__Output { + /** + * The name assigned to the scoped route configurations. + */ + 'name': (string); + /** + * The scoped route configurations. + */ + 'scoped_route_configs': (_google_protobuf_Any__Output)[]; + /** + * The timestamp when the scoped route config set was last updated. + */ + 'last_updated': (_google_protobuf_Timestamp__Output | null); +} + +/** + * Envoy's scoped RDS implementation fills this message with all currently loaded route + * configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both + * the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the + * dynamically obtained scopes via the SRDS API. + */ +export interface ScopedRoutesConfigDump { + /** + * The statically loaded scoped route configs. + */ + 'inline_scoped_route_configs'?: (_envoy_admin_v3_ScopedRoutesConfigDump_InlineScopedRouteConfigs)[]; + /** + * The dynamically loaded scoped route configs. + */ + 'dynamic_scoped_route_configs'?: (_envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs)[]; +} + +/** + * Envoy's scoped RDS implementation fills this message with all currently loaded route + * configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both + * the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the + * dynamically obtained scopes via the SRDS API. + */ +export interface ScopedRoutesConfigDump__Output { + /** + * The statically loaded scoped route configs. + */ + 'inline_scoped_route_configs': (_envoy_admin_v3_ScopedRoutesConfigDump_InlineScopedRouteConfigs__Output)[]; + /** + * The dynamically loaded scoped route configs. + */ + 'dynamic_scoped_route_configs': (_envoy_admin_v3_ScopedRoutesConfigDump_DynamicScopedRouteConfigs__Output)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/admin/v3/UpdateFailureState.ts b/packages/grpc-js-xds/src/generated/envoy/admin/v3/UpdateFailureState.ts new file mode 100644 index 000000000..100c65a1b --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/admin/v3/UpdateFailureState.ts @@ -0,0 +1,46 @@ +// Original file: deps/envoy-api/envoy/admin/v3/config_dump_shared.proto + +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../google/protobuf/Any'; +import type { Timestamp as _google_protobuf_Timestamp, Timestamp__Output as _google_protobuf_Timestamp__Output } from '../../../google/protobuf/Timestamp'; + +export interface UpdateFailureState { + /** + * What the component configuration would have been if the update had succeeded. + * This field may not be populated by xDS clients due to storage overhead. + */ + 'failed_configuration'?: (_google_protobuf_Any | null); + /** + * Time of the latest failed update attempt. + */ + 'last_update_attempt'?: (_google_protobuf_Timestamp | null); + /** + * Details about the last failed update attempt. + */ + 'details'?: (string); + /** + * This is the version of the rejected resource. + * [#not-implemented-hide:] + */ + 'version_info'?: (string); +} + +export interface UpdateFailureState__Output { + /** + * What the component configuration would have been if the update had succeeded. + * This field may not be populated by xDS clients due to storage overhead. + */ + 'failed_configuration': (_google_protobuf_Any__Output | null); + /** + * Time of the latest failed update attempt. + */ + 'last_update_attempt': (_google_protobuf_Timestamp__Output | null); + /** + * Details about the last failed update attempt. + */ + 'details': (string); + /** + * This is the version of the rejected resource. + * [#not-implemented-hide:] + */ + 'version_info': (string); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/Cluster.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/Cluster.ts deleted file mode 100644 index f077d2edc..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/Cluster.ts +++ /dev/null @@ -1,1679 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/cluster.proto - -import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../google/protobuf/Duration'; -import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../google/protobuf/UInt32Value'; -import type { Address as _envoy_api_v2_core_Address, Address__Output as _envoy_api_v2_core_Address__Output } from '../../../envoy/api/v2/core/Address'; -import type { HealthCheck as _envoy_api_v2_core_HealthCheck, HealthCheck__Output as _envoy_api_v2_core_HealthCheck__Output } from '../../../envoy/api/v2/core/HealthCheck'; -import type { CircuitBreakers as _envoy_api_v2_cluster_CircuitBreakers, CircuitBreakers__Output as _envoy_api_v2_cluster_CircuitBreakers__Output } from '../../../envoy/api/v2/cluster/CircuitBreakers'; -import type { UpstreamTlsContext as _envoy_api_v2_auth_UpstreamTlsContext, UpstreamTlsContext__Output as _envoy_api_v2_auth_UpstreamTlsContext__Output } from '../../../envoy/api/v2/auth/UpstreamTlsContext'; -import type { Http1ProtocolOptions as _envoy_api_v2_core_Http1ProtocolOptions, Http1ProtocolOptions__Output as _envoy_api_v2_core_Http1ProtocolOptions__Output } from '../../../envoy/api/v2/core/Http1ProtocolOptions'; -import type { Http2ProtocolOptions as _envoy_api_v2_core_Http2ProtocolOptions, Http2ProtocolOptions__Output as _envoy_api_v2_core_Http2ProtocolOptions__Output } from '../../../envoy/api/v2/core/Http2ProtocolOptions'; -import type { OutlierDetection as _envoy_api_v2_cluster_OutlierDetection, OutlierDetection__Output as _envoy_api_v2_cluster_OutlierDetection__Output } from '../../../envoy/api/v2/cluster/OutlierDetection'; -import type { BindConfig as _envoy_api_v2_core_BindConfig, BindConfig__Output as _envoy_api_v2_core_BindConfig__Output } from '../../../envoy/api/v2/core/BindConfig'; -import type { TransportSocket as _envoy_api_v2_core_TransportSocket, TransportSocket__Output as _envoy_api_v2_core_TransportSocket__Output } from '../../../envoy/api/v2/core/TransportSocket'; -import type { Metadata as _envoy_api_v2_core_Metadata, Metadata__Output as _envoy_api_v2_core_Metadata__Output } from '../../../envoy/api/v2/core/Metadata'; -import type { HttpProtocolOptions as _envoy_api_v2_core_HttpProtocolOptions, HttpProtocolOptions__Output as _envoy_api_v2_core_HttpProtocolOptions__Output } from '../../../envoy/api/v2/core/HttpProtocolOptions'; -import type { UpstreamConnectionOptions as _envoy_api_v2_UpstreamConnectionOptions, UpstreamConnectionOptions__Output as _envoy_api_v2_UpstreamConnectionOptions__Output } from '../../../envoy/api/v2/UpstreamConnectionOptions'; -import type { ClusterLoadAssignment as _envoy_api_v2_ClusterLoadAssignment, ClusterLoadAssignment__Output as _envoy_api_v2_ClusterLoadAssignment__Output } from '../../../envoy/api/v2/ClusterLoadAssignment'; -import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../google/protobuf/Struct'; -import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../google/protobuf/Any'; -import type { Filter as _envoy_api_v2_cluster_Filter, Filter__Output as _envoy_api_v2_cluster_Filter__Output } from '../../../envoy/api/v2/cluster/Filter'; -import type { LoadBalancingPolicy as _envoy_api_v2_LoadBalancingPolicy, LoadBalancingPolicy__Output as _envoy_api_v2_LoadBalancingPolicy__Output } from '../../../envoy/api/v2/LoadBalancingPolicy'; -import type { ConfigSource as _envoy_api_v2_core_ConfigSource, ConfigSource__Output as _envoy_api_v2_core_ConfigSource__Output } from '../../../envoy/api/v2/core/ConfigSource'; -import type { UpstreamHttpProtocolOptions as _envoy_api_v2_core_UpstreamHttpProtocolOptions, UpstreamHttpProtocolOptions__Output as _envoy_api_v2_core_UpstreamHttpProtocolOptions__Output } from '../../../envoy/api/v2/core/UpstreamHttpProtocolOptions'; -import type { UInt64Value as _google_protobuf_UInt64Value, UInt64Value__Output as _google_protobuf_UInt64Value__Output } from '../../../google/protobuf/UInt64Value'; -import type { Percent as _envoy_type_Percent, Percent__Output as _envoy_type_Percent__Output } from '../../../envoy/type/Percent'; -import type { Long } from '@grpc/proto-loader'; - -// Original file: deps/envoy-api/envoy/api/v2/cluster.proto - -export enum _envoy_api_v2_Cluster_ClusterProtocolSelection { - /** - * Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). - * If :ref:`http2_protocol_options ` are - * present, HTTP2 will be used, otherwise HTTP1.1 will be used. - */ - USE_CONFIGURED_PROTOCOL = 0, - /** - * Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. - */ - USE_DOWNSTREAM_PROTOCOL = 1, -} - -/** - * Common configuration for all load balancer implementations. - * [#next-free-field: 8] - */ -export interface _envoy_api_v2_Cluster_CommonLbConfig { - /** - * Configures the :ref:`healthy panic threshold `. - * If not specified, the default is 50%. - * To disable panic mode, set to 0%. - * - * .. note:: - * The specified percent will be truncated to the nearest 1%. - */ - 'healthy_panic_threshold'?: (_envoy_type_Percent); - 'zone_aware_lb_config'?: (_envoy_api_v2_Cluster_CommonLbConfig_ZoneAwareLbConfig); - 'locality_weighted_lb_config'?: (_envoy_api_v2_Cluster_CommonLbConfig_LocalityWeightedLbConfig); - /** - * If set, all health check/weight/metadata updates that happen within this duration will be - * merged and delivered in one shot when the duration expires. The start of the duration is when - * the first update happens. This is useful for big clusters, with potentially noisy deploys - * that might trigger excessive CPU usage due to a constant stream of healthcheck state changes - * or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new - * cluster). Please always keep in mind that the use of sandbox technologies may change this - * behavior. - * - * If this is not set, we default to a merge window of 1000ms. To disable it, set the merge - * window to 0. - * - * Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is - * because merging those updates isn't currently safe. See - * https://github.com/envoyproxy/envoy/pull/3941. - */ - 'update_merge_window'?: (_google_protobuf_Duration); - /** - * If set to true, Envoy will not consider new hosts when computing load balancing weights until - * they have been health checked for the first time. This will have no effect unless - * active health checking is also configured. - * - * Ignoring a host means that for any load balancing calculations that adjust weights based - * on the ratio of eligible hosts and total hosts (priority spillover, locality weighting and - * panic mode) Envoy will exclude these hosts in the denominator. - * - * For example, with hosts in two priorities P0 and P1, where P0 looks like - * {healthy, unhealthy (new), unhealthy (new)} - * and where P1 looks like - * {healthy, healthy} - * all traffic will still hit P0, as 1 / (3 - 2) = 1. - * - * Enabling this will allow scaling up the number of hosts for a given cluster without entering - * panic mode or triggering priority spillover, assuming the hosts pass the first health check. - * - * If panic mode is triggered, new hosts are still eligible for traffic; they simply do not - * contribute to the calculation when deciding whether panic mode is enabled or not. - */ - 'ignore_new_hosts_until_first_hc'?: (boolean); - /** - * If set to `true`, the cluster manager will drain all existing - * connections to upstream hosts whenever hosts are added or removed from the cluster. - */ - 'close_connections_on_host_set_change'?: (boolean); - /** - * Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) - */ - 'consistent_hashing_lb_config'?: (_envoy_api_v2_Cluster_CommonLbConfig_ConsistentHashingLbConfig); - 'locality_config_specifier'?: "zone_aware_lb_config"|"locality_weighted_lb_config"; -} - -/** - * Common configuration for all load balancer implementations. - * [#next-free-field: 8] - */ -export interface _envoy_api_v2_Cluster_CommonLbConfig__Output { - /** - * Configures the :ref:`healthy panic threshold `. - * If not specified, the default is 50%. - * To disable panic mode, set to 0%. - * - * .. note:: - * The specified percent will be truncated to the nearest 1%. - */ - 'healthy_panic_threshold'?: (_envoy_type_Percent__Output); - 'zone_aware_lb_config'?: (_envoy_api_v2_Cluster_CommonLbConfig_ZoneAwareLbConfig__Output); - 'locality_weighted_lb_config'?: (_envoy_api_v2_Cluster_CommonLbConfig_LocalityWeightedLbConfig__Output); - /** - * If set, all health check/weight/metadata updates that happen within this duration will be - * merged and delivered in one shot when the duration expires. The start of the duration is when - * the first update happens. This is useful for big clusters, with potentially noisy deploys - * that might trigger excessive CPU usage due to a constant stream of healthcheck state changes - * or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new - * cluster). Please always keep in mind that the use of sandbox technologies may change this - * behavior. - * - * If this is not set, we default to a merge window of 1000ms. To disable it, set the merge - * window to 0. - * - * Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is - * because merging those updates isn't currently safe. See - * https://github.com/envoyproxy/envoy/pull/3941. - */ - 'update_merge_window'?: (_google_protobuf_Duration__Output); - /** - * If set to true, Envoy will not consider new hosts when computing load balancing weights until - * they have been health checked for the first time. This will have no effect unless - * active health checking is also configured. - * - * Ignoring a host means that for any load balancing calculations that adjust weights based - * on the ratio of eligible hosts and total hosts (priority spillover, locality weighting and - * panic mode) Envoy will exclude these hosts in the denominator. - * - * For example, with hosts in two priorities P0 and P1, where P0 looks like - * {healthy, unhealthy (new), unhealthy (new)} - * and where P1 looks like - * {healthy, healthy} - * all traffic will still hit P0, as 1 / (3 - 2) = 1. - * - * Enabling this will allow scaling up the number of hosts for a given cluster without entering - * panic mode or triggering priority spillover, assuming the hosts pass the first health check. - * - * If panic mode is triggered, new hosts are still eligible for traffic; they simply do not - * contribute to the calculation when deciding whether panic mode is enabled or not. - */ - 'ignore_new_hosts_until_first_hc': (boolean); - /** - * If set to `true`, the cluster manager will drain all existing - * connections to upstream hosts whenever hosts are added or removed from the cluster. - */ - 'close_connections_on_host_set_change': (boolean); - /** - * Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) - */ - 'consistent_hashing_lb_config'?: (_envoy_api_v2_Cluster_CommonLbConfig_ConsistentHashingLbConfig__Output); - 'locality_config_specifier': "zone_aware_lb_config"|"locality_weighted_lb_config"; -} - -/** - * Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) - */ -export interface _envoy_api_v2_Cluster_CommonLbConfig_ConsistentHashingLbConfig { - /** - * If set to `true`, the cluster will use hostname instead of the resolved - * address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. - */ - 'use_hostname_for_hashing'?: (boolean); -} - -/** - * Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) - */ -export interface _envoy_api_v2_Cluster_CommonLbConfig_ConsistentHashingLbConfig__Output { - /** - * If set to `true`, the cluster will use hostname instead of the resolved - * address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. - */ - 'use_hostname_for_hashing': (boolean); -} - -/** - * Extended cluster type. - */ -export interface _envoy_api_v2_Cluster_CustomClusterType { - /** - * The type of the cluster to instantiate. The name must match a supported cluster type. - */ - 'name'?: (string); - /** - * Cluster specific configuration which depends on the cluster being instantiated. - * See the supported cluster for further documentation. - */ - 'typed_config'?: (_google_protobuf_Any); -} - -/** - * Extended cluster type. - */ -export interface _envoy_api_v2_Cluster_CustomClusterType__Output { - /** - * The type of the cluster to instantiate. The name must match a supported cluster type. - */ - 'name': (string); - /** - * Cluster specific configuration which depends on the cluster being instantiated. - * See the supported cluster for further documentation. - */ - 'typed_config'?: (_google_protobuf_Any__Output); -} - -// Original file: deps/envoy-api/envoy/api/v2/cluster.proto - -/** - * Refer to :ref:`service discovery type ` - * for an explanation on each type. - */ -export enum _envoy_api_v2_Cluster_DiscoveryType { - /** - * Refer to the :ref:`static discovery type` - * for an explanation. - */ - STATIC = 0, - /** - * Refer to the :ref:`strict DNS discovery - * type` - * for an explanation. - */ - STRICT_DNS = 1, - /** - * Refer to the :ref:`logical DNS discovery - * type` - * for an explanation. - */ - LOGICAL_DNS = 2, - /** - * Refer to the :ref:`service discovery type` - * for an explanation. - */ - EDS = 3, - /** - * Refer to the :ref:`original destination discovery - * type` - * for an explanation. - */ - ORIGINAL_DST = 4, -} - -// Original file: deps/envoy-api/envoy/api/v2/cluster.proto - -/** - * When V4_ONLY is selected, the DNS resolver will only perform a lookup for - * addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will - * only perform a lookup for addresses in the IPv6 family. If AUTO is - * specified, the DNS resolver will first perform a lookup for addresses in - * the IPv6 family and fallback to a lookup for addresses in the IPv4 family. - * For cluster types other than - * :ref:`STRICT_DNS` and - * :ref:`LOGICAL_DNS`, - * this setting is - * ignored. - */ -export enum _envoy_api_v2_Cluster_DnsLookupFamily { - AUTO = 0, - V4_ONLY = 1, - V6_ONLY = 2, -} - -/** - * Only valid when discovery type is EDS. - */ -export interface _envoy_api_v2_Cluster_EdsClusterConfig { - /** - * Configuration for the source of EDS updates for this Cluster. - */ - 'eds_config'?: (_envoy_api_v2_core_ConfigSource); - /** - * Optional alternative to cluster name to present to EDS. This does not - * have the same restrictions as cluster name, i.e. it may be arbitrary - * length. - */ - 'service_name'?: (string); -} - -/** - * Only valid when discovery type is EDS. - */ -export interface _envoy_api_v2_Cluster_EdsClusterConfig__Output { - /** - * Configuration for the source of EDS updates for this Cluster. - */ - 'eds_config'?: (_envoy_api_v2_core_ConfigSource__Output); - /** - * Optional alternative to cluster name to present to EDS. This does not - * have the same restrictions as cluster name, i.e. it may be arbitrary - * length. - */ - 'service_name': (string); -} - -// Original file: deps/envoy-api/envoy/api/v2/cluster.proto - -/** - * The hash function used to hash hosts onto the ketama ring. - */ -export enum _envoy_api_v2_Cluster_RingHashLbConfig_HashFunction { - /** - * Use `xxHash `_, this is the default hash function. - */ - XX_HASH = 0, - /** - * Use `MurmurHash2 `_, this is compatible with - * std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled - * on Linux and not macOS. - */ - MURMUR_HASH_2 = 1, -} - -// Original file: deps/envoy-api/envoy/api/v2/cluster.proto - -/** - * Refer to :ref:`load balancer type ` architecture - * overview section for information on each type. - */ -export enum _envoy_api_v2_Cluster_LbPolicy { - /** - * Refer to the :ref:`round robin load balancing - * policy` - * for an explanation. - */ - ROUND_ROBIN = 0, - /** - * Refer to the :ref:`least request load balancing - * policy` - * for an explanation. - */ - LEAST_REQUEST = 1, - /** - * Refer to the :ref:`ring hash load balancing - * policy` - * for an explanation. - */ - RING_HASH = 2, - /** - * Refer to the :ref:`random load balancing - * policy` - * for an explanation. - */ - RANDOM = 3, - /** - * Refer to the :ref:`original destination load balancing - * policy` - * for an explanation. - * - * .. attention:: - * - * **This load balancing policy is deprecated**. Use CLUSTER_PROVIDED instead. - */ - ORIGINAL_DST_LB = 4, - /** - * Refer to the :ref:`Maglev load balancing policy` - * for an explanation. - */ - MAGLEV = 5, - /** - * This load balancer type must be specified if the configured cluster provides a cluster - * specific load balancer. Consult the configured cluster's documentation for whether to set - * this option or not. - */ - CLUSTER_PROVIDED = 6, - /** - * [#not-implemented-hide:] Use the new :ref:`load_balancing_policy - * ` field to determine the LB policy. - * [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field - * and instead using the new load_balancing_policy field as the one and only mechanism for - * configuring this.] - */ - LOAD_BALANCING_POLICY_CONFIG = 7, -} - -/** - * Optionally divide the endpoints in this cluster into subsets defined by - * endpoint metadata and selected by route and weighted cluster metadata. - * [#next-free-field: 8] - */ -export interface _envoy_api_v2_Cluster_LbSubsetConfig { - /** - * The behavior used when no endpoint subset matches the selected route's - * metadata. The value defaults to - * :ref:`NO_FALLBACK`. - */ - 'fallback_policy'?: (_envoy_api_v2_Cluster_LbSubsetConfig_LbSubsetFallbackPolicy | keyof typeof _envoy_api_v2_Cluster_LbSubsetConfig_LbSubsetFallbackPolicy); - /** - * Specifies the default subset of endpoints used during fallback if - * fallback_policy is - * :ref:`DEFAULT_SUBSET`. - * Each field in default_subset is - * compared to the matching LbEndpoint.Metadata under the *envoy.lb* - * namespace. It is valid for no hosts to match, in which case the behavior - * is the same as a fallback_policy of - * :ref:`NO_FALLBACK`. - */ - 'default_subset'?: (_google_protobuf_Struct); - /** - * For each entry, LbEndpoint.Metadata's - * *envoy.lb* namespace is traversed and a subset is created for each unique - * combination of key and value. For example: - * - * .. code-block:: json - * - * { "subset_selectors": [ - * { "keys": [ "version" ] }, - * { "keys": [ "stage", "hardware_type" ] } - * ]} - * - * A subset is matched when the metadata from the selected route and - * weighted cluster contains the same keys and values as the subset's - * metadata. The same host may appear in multiple subsets. - */ - 'subset_selectors'?: (_envoy_api_v2_Cluster_LbSubsetConfig_LbSubsetSelector)[]; - /** - * If true, routing to subsets will take into account the localities and locality weights of the - * endpoints when making the routing decision. - * - * There are some potential pitfalls associated with enabling this feature, as the resulting - * traffic split after applying both a subset match and locality weights might be undesirable. - * - * Consider for example a situation in which you have 50/50 split across two localities X/Y - * which have 100 hosts each without subsetting. If the subset LB results in X having only 1 - * host selected but Y having 100, then a lot more load is being dumped on the single host in X - * than originally anticipated in the load balancing assignment delivered via EDS. - */ - 'locality_weight_aware'?: (boolean); - /** - * When used with locality_weight_aware, scales the weight of each locality by the ratio - * of hosts in the subset vs hosts in the original subset. This aims to even out the load - * going to an individual locality if said locality is disproportionately affected by the - * subset predicate. - */ - 'scale_locality_weight'?: (boolean); - /** - * If true, when a fallback policy is configured and its corresponding subset fails to find - * a host this will cause any host to be selected instead. - * - * This is useful when using the default subset as the fallback policy, given the default - * subset might become empty. With this option enabled, if that happens the LB will attempt - * to select a host from the entire cluster. - */ - 'panic_mode_any'?: (boolean); - /** - * If true, metadata specified for a metadata key will be matched against the corresponding - * endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value - * and any of the elements in the list matches the criteria. - */ - 'list_as_any'?: (boolean); -} - -/** - * Optionally divide the endpoints in this cluster into subsets defined by - * endpoint metadata and selected by route and weighted cluster metadata. - * [#next-free-field: 8] - */ -export interface _envoy_api_v2_Cluster_LbSubsetConfig__Output { - /** - * The behavior used when no endpoint subset matches the selected route's - * metadata. The value defaults to - * :ref:`NO_FALLBACK`. - */ - 'fallback_policy': (keyof typeof _envoy_api_v2_Cluster_LbSubsetConfig_LbSubsetFallbackPolicy); - /** - * Specifies the default subset of endpoints used during fallback if - * fallback_policy is - * :ref:`DEFAULT_SUBSET`. - * Each field in default_subset is - * compared to the matching LbEndpoint.Metadata under the *envoy.lb* - * namespace. It is valid for no hosts to match, in which case the behavior - * is the same as a fallback_policy of - * :ref:`NO_FALLBACK`. - */ - 'default_subset'?: (_google_protobuf_Struct__Output); - /** - * For each entry, LbEndpoint.Metadata's - * *envoy.lb* namespace is traversed and a subset is created for each unique - * combination of key and value. For example: - * - * .. code-block:: json - * - * { "subset_selectors": [ - * { "keys": [ "version" ] }, - * { "keys": [ "stage", "hardware_type" ] } - * ]} - * - * A subset is matched when the metadata from the selected route and - * weighted cluster contains the same keys and values as the subset's - * metadata. The same host may appear in multiple subsets. - */ - 'subset_selectors': (_envoy_api_v2_Cluster_LbSubsetConfig_LbSubsetSelector__Output)[]; - /** - * If true, routing to subsets will take into account the localities and locality weights of the - * endpoints when making the routing decision. - * - * There are some potential pitfalls associated with enabling this feature, as the resulting - * traffic split after applying both a subset match and locality weights might be undesirable. - * - * Consider for example a situation in which you have 50/50 split across two localities X/Y - * which have 100 hosts each without subsetting. If the subset LB results in X having only 1 - * host selected but Y having 100, then a lot more load is being dumped on the single host in X - * than originally anticipated in the load balancing assignment delivered via EDS. - */ - 'locality_weight_aware': (boolean); - /** - * When used with locality_weight_aware, scales the weight of each locality by the ratio - * of hosts in the subset vs hosts in the original subset. This aims to even out the load - * going to an individual locality if said locality is disproportionately affected by the - * subset predicate. - */ - 'scale_locality_weight': (boolean); - /** - * If true, when a fallback policy is configured and its corresponding subset fails to find - * a host this will cause any host to be selected instead. - * - * This is useful when using the default subset as the fallback policy, given the default - * subset might become empty. With this option enabled, if that happens the LB will attempt - * to select a host from the entire cluster. - */ - 'panic_mode_any': (boolean); - /** - * If true, metadata specified for a metadata key will be matched against the corresponding - * endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value - * and any of the elements in the list matches the criteria. - */ - 'list_as_any': (boolean); -} - -// Original file: deps/envoy-api/envoy/api/v2/cluster.proto - -/** - * If NO_FALLBACK is selected, a result - * equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, - * any cluster endpoint may be returned (subject to policy, health checks, - * etc). If DEFAULT_SUBSET is selected, load balancing is performed over the - * endpoints matching the values from the default_subset field. - */ -export enum _envoy_api_v2_Cluster_LbSubsetConfig_LbSubsetFallbackPolicy { - NO_FALLBACK = 0, - ANY_ENDPOINT = 1, - DEFAULT_SUBSET = 2, -} - -/** - * Specifications for subsets. - */ -export interface _envoy_api_v2_Cluster_LbSubsetConfig_LbSubsetSelector { - /** - * List of keys to match with the weighted cluster metadata. - */ - 'keys'?: (string)[]; - /** - * The behavior used when no endpoint subset matches the selected route's - * metadata. - */ - 'fallback_policy'?: (_envoy_api_v2_Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy | keyof typeof _envoy_api_v2_Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy); - /** - * Subset of - * :ref:`keys` used by - * :ref:`KEYS_SUBSET` - * fallback policy. - * It has to be a non empty list if KEYS_SUBSET fallback policy is selected. - * For any other fallback policy the parameter is not used and should not be set. - * Only values also present in - * :ref:`keys` are allowed, but - * `fallback_keys_subset` cannot be equal to `keys`. - */ - 'fallback_keys_subset'?: (string)[]; -} - -/** - * Specifications for subsets. - */ -export interface _envoy_api_v2_Cluster_LbSubsetConfig_LbSubsetSelector__Output { - /** - * List of keys to match with the weighted cluster metadata. - */ - 'keys': (string)[]; - /** - * The behavior used when no endpoint subset matches the selected route's - * metadata. - */ - 'fallback_policy': (keyof typeof _envoy_api_v2_Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy); - /** - * Subset of - * :ref:`keys` used by - * :ref:`KEYS_SUBSET` - * fallback policy. - * It has to be a non empty list if KEYS_SUBSET fallback policy is selected. - * For any other fallback policy the parameter is not used and should not be set. - * Only values also present in - * :ref:`keys` are allowed, but - * `fallback_keys_subset` cannot be equal to `keys`. - */ - 'fallback_keys_subset': (string)[]; -} - -// Original file: deps/envoy-api/envoy/api/v2/cluster.proto - -/** - * Allows to override top level fallback policy per selector. - */ -export enum _envoy_api_v2_Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy { - /** - * If NOT_DEFINED top level config fallback policy is used instead. - */ - NOT_DEFINED = 0, - /** - * If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. - */ - NO_FALLBACK = 1, - /** - * If ANY_ENDPOINT is selected, any cluster endpoint may be returned - * (subject to policy, health checks, etc). - */ - ANY_ENDPOINT = 2, - /** - * If DEFAULT_SUBSET is selected, load balancing is performed over the - * endpoints matching the values from the default_subset field. - */ - DEFAULT_SUBSET = 3, - /** - * If KEYS_SUBSET is selected, subset selector matching is performed again with metadata - * keys reduced to - * :ref:`fallback_keys_subset`. - * It allows for a fallback to a different, less specific selector if some of the keys of - * the selector are considered optional. - */ - KEYS_SUBSET = 4, -} - -/** - * Specific configuration for the LeastRequest load balancing policy. - */ -export interface _envoy_api_v2_Cluster_LeastRequestLbConfig { - /** - * The number of random healthy hosts from which the host with the fewest active requests will - * be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. - */ - 'choice_count'?: (_google_protobuf_UInt32Value); -} - -/** - * Specific configuration for the LeastRequest load balancing policy. - */ -export interface _envoy_api_v2_Cluster_LeastRequestLbConfig__Output { - /** - * The number of random healthy hosts from which the host with the fewest active requests will - * be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. - */ - 'choice_count'?: (_google_protobuf_UInt32Value__Output); -} - -/** - * Configuration for :ref:`locality weighted load balancing - * ` - */ -export interface _envoy_api_v2_Cluster_CommonLbConfig_LocalityWeightedLbConfig { -} - -/** - * Configuration for :ref:`locality weighted load balancing - * ` - */ -export interface _envoy_api_v2_Cluster_CommonLbConfig_LocalityWeightedLbConfig__Output { -} - -/** - * Specific configuration for the - * :ref:`Original Destination ` - * load balancing policy. - */ -export interface _envoy_api_v2_Cluster_OriginalDstLbConfig { - /** - * When true, :ref:`x-envoy-original-dst-host - * ` can be used to override destination - * address. - * - * .. attention:: - * - * This header isn't sanitized by default, so enabling this feature allows HTTP clients to - * route traffic to arbitrary hosts and/or ports, which may have serious security - * consequences. - */ - 'use_http_header'?: (boolean); -} - -/** - * Specific configuration for the - * :ref:`Original Destination ` - * load balancing policy. - */ -export interface _envoy_api_v2_Cluster_OriginalDstLbConfig__Output { - /** - * When true, :ref:`x-envoy-original-dst-host - * ` can be used to override destination - * address. - * - * .. attention:: - * - * This header isn't sanitized by default, so enabling this feature allows HTTP clients to - * route traffic to arbitrary hosts and/or ports, which may have serious security - * consequences. - */ - 'use_http_header': (boolean); -} - -export interface _envoy_api_v2_Cluster_RefreshRate { - /** - * Specifies the base interval between refreshes. This parameter is required and must be greater - * than zero and less than - * :ref:`max_interval `. - */ - 'base_interval'?: (_google_protobuf_Duration); - /** - * Specifies the maximum interval between refreshes. This parameter is optional, but must be - * greater than or equal to the - * :ref:`base_interval ` if set. The default - * is 10 times the :ref:`base_interval `. - */ - 'max_interval'?: (_google_protobuf_Duration); -} - -export interface _envoy_api_v2_Cluster_RefreshRate__Output { - /** - * Specifies the base interval between refreshes. This parameter is required and must be greater - * than zero and less than - * :ref:`max_interval `. - */ - 'base_interval'?: (_google_protobuf_Duration__Output); - /** - * Specifies the maximum interval between refreshes. This parameter is optional, but must be - * greater than or equal to the - * :ref:`base_interval ` if set. The default - * is 10 times the :ref:`base_interval `. - */ - 'max_interval'?: (_google_protobuf_Duration__Output); -} - -/** - * Specific configuration for the :ref:`RingHash` - * load balancing policy. - */ -export interface _envoy_api_v2_Cluster_RingHashLbConfig { - /** - * Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each - * provided host) the better the request distribution will reflect the desired weights. Defaults - * to 1024 entries, and limited to 8M entries. See also - * :ref:`maximum_ring_size`. - */ - 'minimum_ring_size'?: (_google_protobuf_UInt64Value); - /** - * The hash function used to hash hosts onto the ketama ring. The value defaults to - * :ref:`XX_HASH`. - */ - 'hash_function'?: (_envoy_api_v2_Cluster_RingHashLbConfig_HashFunction | keyof typeof _envoy_api_v2_Cluster_RingHashLbConfig_HashFunction); - /** - * Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered - * to further constrain resource use. See also - * :ref:`minimum_ring_size`. - */ - 'maximum_ring_size'?: (_google_protobuf_UInt64Value); -} - -/** - * Specific configuration for the :ref:`RingHash` - * load balancing policy. - */ -export interface _envoy_api_v2_Cluster_RingHashLbConfig__Output { - /** - * Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each - * provided host) the better the request distribution will reflect the desired weights. Defaults - * to 1024 entries, and limited to 8M entries. See also - * :ref:`maximum_ring_size`. - */ - 'minimum_ring_size'?: (_google_protobuf_UInt64Value__Output); - /** - * The hash function used to hash hosts onto the ketama ring. The value defaults to - * :ref:`XX_HASH`. - */ - 'hash_function': (keyof typeof _envoy_api_v2_Cluster_RingHashLbConfig_HashFunction); - /** - * Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered - * to further constrain resource use. See also - * :ref:`minimum_ring_size`. - */ - 'maximum_ring_size'?: (_google_protobuf_UInt64Value__Output); -} - -/** - * TransportSocketMatch specifies what transport socket config will be used - * when the match conditions are satisfied. - */ -export interface _envoy_api_v2_Cluster_TransportSocketMatch { - /** - * The name of the match, used in stats generation. - */ - 'name'?: (string); - /** - * Optional endpoint metadata match criteria. - * The connection to the endpoint with metadata matching what is set in this field - * will use the transport socket configuration specified here. - * The endpoint's metadata entry in *envoy.transport_socket_match* is used to match - * against the values specified in this field. - */ - 'match'?: (_google_protobuf_Struct); - /** - * The configuration of the transport socket. - */ - 'transport_socket'?: (_envoy_api_v2_core_TransportSocket); -} - -/** - * TransportSocketMatch specifies what transport socket config will be used - * when the match conditions are satisfied. - */ -export interface _envoy_api_v2_Cluster_TransportSocketMatch__Output { - /** - * The name of the match, used in stats generation. - */ - 'name': (string); - /** - * Optional endpoint metadata match criteria. - * The connection to the endpoint with metadata matching what is set in this field - * will use the transport socket configuration specified here. - * The endpoint's metadata entry in *envoy.transport_socket_match* is used to match - * against the values specified in this field. - */ - 'match'?: (_google_protobuf_Struct__Output); - /** - * The configuration of the transport socket. - */ - 'transport_socket'?: (_envoy_api_v2_core_TransportSocket__Output); -} - -/** - * Configuration for :ref:`zone aware routing - * `. - */ -export interface _envoy_api_v2_Cluster_CommonLbConfig_ZoneAwareLbConfig { - /** - * Configures percentage of requests that will be considered for zone aware routing - * if zone aware routing is configured. If not specified, the default is 100%. - * * :ref:`runtime values `. - * * :ref:`Zone aware routing support `. - */ - 'routing_enabled'?: (_envoy_type_Percent); - /** - * Configures minimum upstream cluster size required for zone aware routing - * If upstream cluster size is less than specified, zone aware routing is not performed - * even if zone aware routing is configured. If not specified, the default is 6. - * * :ref:`runtime values `. - * * :ref:`Zone aware routing support `. - */ - 'min_cluster_size'?: (_google_protobuf_UInt64Value); - /** - * If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic - * mode`. Instead, the cluster will fail all - * requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a - * failing service. - */ - 'fail_traffic_on_panic'?: (boolean); -} - -/** - * Configuration for :ref:`zone aware routing - * `. - */ -export interface _envoy_api_v2_Cluster_CommonLbConfig_ZoneAwareLbConfig__Output { - /** - * Configures percentage of requests that will be considered for zone aware routing - * if zone aware routing is configured. If not specified, the default is 100%. - * * :ref:`runtime values `. - * * :ref:`Zone aware routing support `. - */ - 'routing_enabled'?: (_envoy_type_Percent__Output); - /** - * Configures minimum upstream cluster size required for zone aware routing - * If upstream cluster size is less than specified, zone aware routing is not performed - * even if zone aware routing is configured. If not specified, the default is 6. - * * :ref:`runtime values `. - * * :ref:`Zone aware routing support `. - */ - 'min_cluster_size'?: (_google_protobuf_UInt64Value__Output); - /** - * If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic - * mode`. Instead, the cluster will fail all - * requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a - * failing service. - */ - 'fail_traffic_on_panic': (boolean); -} - -/** - * Configuration for a single upstream cluster. - * [#next-free-field: 48] - */ -export interface Cluster { - /** - * Supplies the name of the cluster which must be unique across all clusters. - * The cluster name is used when emitting - * :ref:`statistics ` if :ref:`alt_stat_name - * ` is not provided. - * Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. - */ - 'name'?: (string); - /** - * The :ref:`service discovery type ` - * to use for resolving the cluster. - */ - 'type'?: (_envoy_api_v2_Cluster_DiscoveryType | keyof typeof _envoy_api_v2_Cluster_DiscoveryType); - /** - * Configuration to use for EDS updates for the Cluster. - */ - 'eds_cluster_config'?: (_envoy_api_v2_Cluster_EdsClusterConfig); - /** - * The timeout for new network connections to hosts in the cluster. - */ - 'connect_timeout'?: (_google_protobuf_Duration); - /** - * Soft limit on size of the cluster’s connections read and write buffers. If - * unspecified, an implementation defined default is applied (1MiB). - */ - 'per_connection_buffer_limit_bytes'?: (_google_protobuf_UInt32Value); - /** - * The :ref:`load balancer type ` to use - * when picking a host in the cluster. - */ - 'lb_policy'?: (_envoy_api_v2_Cluster_LbPolicy | keyof typeof _envoy_api_v2_Cluster_LbPolicy); - /** - * If the service discovery type is - * :ref:`STATIC`, - * :ref:`STRICT_DNS` - * or :ref:`LOGICAL_DNS`, - * then hosts is required. - * - * .. attention:: - * - * **This field is deprecated**. Set the - * :ref:`load_assignment` field instead. - */ - 'hosts'?: (_envoy_api_v2_core_Address)[]; - /** - * Optional :ref:`active health checking ` - * configuration for the cluster. If no - * configuration is specified no health checking will be done and all cluster - * members will be considered healthy at all times. - */ - 'health_checks'?: (_envoy_api_v2_core_HealthCheck)[]; - /** - * Optional maximum requests for a single upstream connection. This parameter - * is respected by both the HTTP/1.1 and HTTP/2 connection pool - * implementations. If not specified, there is no limit. Setting this - * parameter to 1 will effectively disable keep alive. - */ - 'max_requests_per_connection'?: (_google_protobuf_UInt32Value); - /** - * Optional :ref:`circuit breaking ` for the cluster. - */ - 'circuit_breakers'?: (_envoy_api_v2_cluster_CircuitBreakers); - /** - * The TLS configuration for connections to the upstream cluster. - * - * .. attention:: - * - * **This field is deprecated**. Use `transport_socket` with name `tls` instead. If both are - * set, `transport_socket` takes priority. - */ - 'tls_context'?: (_envoy_api_v2_auth_UpstreamTlsContext); - /** - * Additional options when handling HTTP1 requests. - */ - 'http_protocol_options'?: (_envoy_api_v2_core_Http1ProtocolOptions); - /** - * Even if default HTTP2 protocol options are desired, this field must be - * set so that Envoy will assume that the upstream supports HTTP/2 when - * making new HTTP connection pool connections. Currently, Envoy only - * supports prior knowledge for upstream connections. Even if TLS is used - * with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 - * connections to happen over plain text. - */ - 'http2_protocol_options'?: (_envoy_api_v2_core_Http2ProtocolOptions); - /** - * If the DNS refresh rate is specified and the cluster type is either - * :ref:`STRICT_DNS`, - * or :ref:`LOGICAL_DNS`, - * this value is used as the cluster’s DNS refresh - * rate. The value configured must be at least 1ms. If this setting is not specified, the - * value defaults to 5000ms. For cluster types other than - * :ref:`STRICT_DNS` - * and :ref:`LOGICAL_DNS` - * this setting is ignored. - */ - 'dns_refresh_rate'?: (_google_protobuf_Duration); - /** - * The DNS IP address resolution policy. If this setting is not specified, the - * value defaults to - * :ref:`AUTO`. - */ - 'dns_lookup_family'?: (_envoy_api_v2_Cluster_DnsLookupFamily | keyof typeof _envoy_api_v2_Cluster_DnsLookupFamily); - /** - * If DNS resolvers are specified and the cluster type is either - * :ref:`STRICT_DNS`, - * or :ref:`LOGICAL_DNS`, - * this value is used to specify the cluster’s dns resolvers. - * If this setting is not specified, the value defaults to the default - * resolver, which uses /etc/resolv.conf for configuration. For cluster types - * other than - * :ref:`STRICT_DNS` - * and :ref:`LOGICAL_DNS` - * this setting is ignored. - */ - 'dns_resolvers'?: (_envoy_api_v2_core_Address)[]; - /** - * If specified, outlier detection will be enabled for this upstream cluster. - * Each of the configuration values can be overridden via - * :ref:`runtime values `. - */ - 'outlier_detection'?: (_envoy_api_v2_cluster_OutlierDetection); - /** - * The interval for removing stale hosts from a cluster type - * :ref:`ORIGINAL_DST`. - * Hosts are considered stale if they have not been used - * as upstream destinations during this interval. New hosts are added - * to original destination clusters on demand as new connections are - * redirected to Envoy, causing the number of hosts in the cluster to - * grow over time. Hosts that are not stale (they are actively used as - * destinations) are kept in the cluster, which allows connections to - * them remain open, saving the latency that would otherwise be spent - * on opening new connections. If this setting is not specified, the - * value defaults to 5000ms. For cluster types other than - * :ref:`ORIGINAL_DST` - * this setting is ignored. - */ - 'cleanup_interval'?: (_google_protobuf_Duration); - /** - * Optional configuration used to bind newly established upstream connections. - * This overrides any bind_config specified in the bootstrap proto. - * If the address and port are empty, no bind will be performed. - */ - 'upstream_bind_config'?: (_envoy_api_v2_core_BindConfig); - /** - * Configuration for load balancing subsetting. - */ - 'lb_subset_config'?: (_envoy_api_v2_Cluster_LbSubsetConfig); - /** - * Optional configuration for the Ring Hash load balancing policy. - */ - 'ring_hash_lb_config'?: (_envoy_api_v2_Cluster_RingHashLbConfig); - /** - * Optional custom transport socket implementation to use for upstream connections. - * To setup TLS, set a transport socket with name `tls` and - * :ref:`UpstreamTlsContexts ` in the `typed_config`. - * If no transport socket configuration is specified, new connections - * will be set up with plaintext. - */ - 'transport_socket'?: (_envoy_api_v2_core_TransportSocket); - /** - * The Metadata field can be used to provide additional information about the - * cluster. It can be used for stats, logging, and varying filter behavior. - * Fields should use reverse DNS notation to denote which entity within Envoy - * will need the information. For instance, if the metadata is intended for - * the Router filter, the filter name should be specified as *envoy.filters.http.router*. - */ - 'metadata'?: (_envoy_api_v2_core_Metadata); - /** - * Determines how Envoy selects the protocol used to speak to upstream hosts. - */ - 'protocol_selection'?: (_envoy_api_v2_Cluster_ClusterProtocolSelection | keyof typeof _envoy_api_v2_Cluster_ClusterProtocolSelection); - /** - * Common configuration for all load balancer implementations. - */ - 'common_lb_config'?: (_envoy_api_v2_Cluster_CommonLbConfig); - /** - * An optional alternative to the cluster name to be used while emitting stats. - * Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be - * confused with :ref:`Router Filter Header - * `. - */ - 'alt_stat_name'?: (string); - /** - * Additional options when handling HTTP requests upstream. These options will be applicable to - * both HTTP1 and HTTP2 requests. - */ - 'common_http_protocol_options'?: (_envoy_api_v2_core_HttpProtocolOptions); - /** - * Optional options for upstream connections. - */ - 'upstream_connection_options'?: (_envoy_api_v2_UpstreamConnectionOptions); - /** - * If an upstream host becomes unhealthy (as determined by the configured health checks - * or outlier detection), immediately close all connections to the failed host. - * - * .. note:: - * - * This is currently only supported for connections created by tcp_proxy. - * - * .. note:: - * - * The current implementation of this feature closes all connections immediately when - * the unhealthy status is detected. If there are a large number of connections open - * to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of - * time exclusively closing these connections, and not processing any other traffic. - */ - 'close_connections_on_host_health_failure'?: (boolean); - /** - * If set to true, Envoy will ignore the health value of a host when processing its removal - * from service discovery. This means that if active health checking is used, Envoy will *not* - * wait for the endpoint to go unhealthy before removing it. - */ - 'drain_connections_on_host_removal'?: (boolean); - /** - * Setting this is required for specifying members of - * :ref:`STATIC`, - * :ref:`STRICT_DNS` - * or :ref:`LOGICAL_DNS` clusters. - * This field supersedes the *hosts* field in the v2 API. - * - * .. attention:: - * - * Setting this allows non-EDS cluster types to contain embedded EDS equivalent - * :ref:`endpoint assignments`. - */ - 'load_assignment'?: (_envoy_api_v2_ClusterLoadAssignment); - /** - * Optional configuration for the Original Destination load balancing policy. - */ - 'original_dst_lb_config'?: (_envoy_api_v2_Cluster_OriginalDstLbConfig); - /** - * The extension_protocol_options field is used to provide extension-specific protocol options - * for upstream connections. The key should match the extension filter name, such as - * "envoy.filters.network.thrift_proxy". See the extension's documentation for details on - * specific options. - */ - 'extension_protocol_options'?: ({[key: string]: _google_protobuf_Struct}); - /** - * The extension_protocol_options field is used to provide extension-specific protocol options - * for upstream connections. The key should match the extension filter name, such as - * "envoy.filters.network.thrift_proxy". See the extension's documentation for details on - * specific options. - */ - 'typed_extension_protocol_options'?: ({[key: string]: _google_protobuf_Any}); - /** - * Optional configuration for the LeastRequest load balancing policy. - */ - 'least_request_lb_config'?: (_envoy_api_v2_Cluster_LeastRequestLbConfig); - /** - * The custom cluster type. - */ - 'cluster_type'?: (_envoy_api_v2_Cluster_CustomClusterType); - /** - * Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, - * cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS - * resolution. - */ - 'respect_dns_ttl'?: (boolean); - /** - * An (optional) network filter chain, listed in the order the filters should be applied. - * The chain will be applied to all outgoing connections that Envoy makes to the upstream - * servers of this cluster. - */ - 'filters'?: (_envoy_api_v2_cluster_Filter)[]; - /** - * [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the - * :ref:`lb_policy` field has the value - * :ref:`LOAD_BALANCING_POLICY_CONFIG`. - */ - 'load_balancing_policy'?: (_envoy_api_v2_LoadBalancingPolicy); - /** - * [#not-implemented-hide:] - * If present, tells the client where to send load reports via LRS. If not present, the - * client will fall back to a client-side default, which may be either (a) don't send any - * load reports or (b) send load reports for all clusters to a single default server - * (which may be configured in the bootstrap file). - * - * Note that if multiple clusters point to the same LRS server, the client may choose to - * create a separate stream for each cluster or it may choose to coalesce the data for - * multiple clusters onto a single stream. Either way, the client must make sure to send - * the data for any given cluster on no more than one stream. - * - * [#next-major-version: In the v3 API, we should consider restructuring this somehow, - * maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation - * from the LRS stream here.] - */ - 'lrs_server'?: (_envoy_api_v2_core_ConfigSource); - /** - * Configuration to use different transport sockets for different endpoints. - * The entry of *envoy.transport_socket_match* in the - * :ref:`LbEndpoint.Metadata ` - * is used to match against the transport sockets as they appear in the list. The first - * :ref:`match ` is used. - * For example, with the following match - * - * .. code-block:: yaml - * - * transport_socket_matches: - * - name: "enableMTLS" - * match: - * acceptMTLS: true - * transport_socket: - * name: envoy.transport_sockets.tls - * config: { ... } # tls socket configuration - * - name: "defaultToPlaintext" - * match: {} - * transport_socket: - * name: envoy.transport_sockets.raw_buffer - * - * Connections to the endpoints whose metadata value under *envoy.transport_socket_match* - * having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. - * - * If a :ref:`socket match ` with empty match - * criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" - * socket match in case above. - * - * If an endpoint metadata's value under *envoy.transport_socket_match* does not match any - * *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or - * *transport_socket* specified in this cluster. - * - * This field allows gradual and flexible transport socket configuration changes. - * - * The metadata of endpoints in EDS can indicate transport socket capabilities. For example, - * an endpoint's metadata can have two key value pairs as "acceptMTLS": "true", - * "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic - * has "acceptPlaintext": "true" metadata information. - * - * Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS - * traffic for endpoints with "acceptMTLS": "true", by adding a corresponding - * *TransportSocketMatch* in this field. Other client Envoys receive CDS without - * *transport_socket_match* set, and still send plain text traffic to the same cluster. - * - * [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] - */ - 'transport_socket_matches'?: (_envoy_api_v2_Cluster_TransportSocketMatch)[]; - /** - * If the DNS failure refresh rate is specified and the cluster type is either - * :ref:`STRICT_DNS`, - * or :ref:`LOGICAL_DNS`, - * this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is - * not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types - * other than :ref:`STRICT_DNS` and - * :ref:`LOGICAL_DNS` this setting is - * ignored. - */ - 'dns_failure_refresh_rate'?: (_envoy_api_v2_Cluster_RefreshRate); - /** - * [#next-major-version: Reconcile DNS options in a single message.] - * Always use TCP queries instead of UDP queries for DNS lookups. - */ - 'use_tcp_for_dns_lookups'?: (boolean); - /** - * HTTP protocol options that are applied only to upstream HTTP connections. - * These options apply to all HTTP versions. - */ - 'upstream_http_protocol_options'?: (_envoy_api_v2_core_UpstreamHttpProtocolOptions); - /** - * If track_timeout_budgets is true, the :ref:`timeout budget histograms - * ` will be published for each - * request. These show what percentage of a request's per try and global timeout was used. A value - * of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value - * of 100 would indicate that the request took the entirety of the timeout given to it. - */ - 'track_timeout_budgets'?: (boolean); - 'cluster_discovery_type'?: "type"|"cluster_type"; - /** - * Optional configuration for the load balancing algorithm selected by - * LbPolicy. Currently only - * :ref:`RING_HASH` and - * :ref:`LEAST_REQUEST` - * has additional configuration options. - * Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding - * LbPolicy will generate an error at runtime. - */ - 'lb_config'?: "ring_hash_lb_config"|"original_dst_lb_config"|"least_request_lb_config"; -} - -/** - * Configuration for a single upstream cluster. - * [#next-free-field: 48] - */ -export interface Cluster__Output { - /** - * Supplies the name of the cluster which must be unique across all clusters. - * The cluster name is used when emitting - * :ref:`statistics ` if :ref:`alt_stat_name - * ` is not provided. - * Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. - */ - 'name': (string); - /** - * The :ref:`service discovery type ` - * to use for resolving the cluster. - */ - 'type'?: (keyof typeof _envoy_api_v2_Cluster_DiscoveryType); - /** - * Configuration to use for EDS updates for the Cluster. - */ - 'eds_cluster_config'?: (_envoy_api_v2_Cluster_EdsClusterConfig__Output); - /** - * The timeout for new network connections to hosts in the cluster. - */ - 'connect_timeout'?: (_google_protobuf_Duration__Output); - /** - * Soft limit on size of the cluster’s connections read and write buffers. If - * unspecified, an implementation defined default is applied (1MiB). - */ - 'per_connection_buffer_limit_bytes'?: (_google_protobuf_UInt32Value__Output); - /** - * The :ref:`load balancer type ` to use - * when picking a host in the cluster. - */ - 'lb_policy': (keyof typeof _envoy_api_v2_Cluster_LbPolicy); - /** - * If the service discovery type is - * :ref:`STATIC`, - * :ref:`STRICT_DNS` - * or :ref:`LOGICAL_DNS`, - * then hosts is required. - * - * .. attention:: - * - * **This field is deprecated**. Set the - * :ref:`load_assignment` field instead. - */ - 'hosts': (_envoy_api_v2_core_Address__Output)[]; - /** - * Optional :ref:`active health checking ` - * configuration for the cluster. If no - * configuration is specified no health checking will be done and all cluster - * members will be considered healthy at all times. - */ - 'health_checks': (_envoy_api_v2_core_HealthCheck__Output)[]; - /** - * Optional maximum requests for a single upstream connection. This parameter - * is respected by both the HTTP/1.1 and HTTP/2 connection pool - * implementations. If not specified, there is no limit. Setting this - * parameter to 1 will effectively disable keep alive. - */ - 'max_requests_per_connection'?: (_google_protobuf_UInt32Value__Output); - /** - * Optional :ref:`circuit breaking ` for the cluster. - */ - 'circuit_breakers'?: (_envoy_api_v2_cluster_CircuitBreakers__Output); - /** - * The TLS configuration for connections to the upstream cluster. - * - * .. attention:: - * - * **This field is deprecated**. Use `transport_socket` with name `tls` instead. If both are - * set, `transport_socket` takes priority. - */ - 'tls_context'?: (_envoy_api_v2_auth_UpstreamTlsContext__Output); - /** - * Additional options when handling HTTP1 requests. - */ - 'http_protocol_options'?: (_envoy_api_v2_core_Http1ProtocolOptions__Output); - /** - * Even if default HTTP2 protocol options are desired, this field must be - * set so that Envoy will assume that the upstream supports HTTP/2 when - * making new HTTP connection pool connections. Currently, Envoy only - * supports prior knowledge for upstream connections. Even if TLS is used - * with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 - * connections to happen over plain text. - */ - 'http2_protocol_options'?: (_envoy_api_v2_core_Http2ProtocolOptions__Output); - /** - * If the DNS refresh rate is specified and the cluster type is either - * :ref:`STRICT_DNS`, - * or :ref:`LOGICAL_DNS`, - * this value is used as the cluster’s DNS refresh - * rate. The value configured must be at least 1ms. If this setting is not specified, the - * value defaults to 5000ms. For cluster types other than - * :ref:`STRICT_DNS` - * and :ref:`LOGICAL_DNS` - * this setting is ignored. - */ - 'dns_refresh_rate'?: (_google_protobuf_Duration__Output); - /** - * The DNS IP address resolution policy. If this setting is not specified, the - * value defaults to - * :ref:`AUTO`. - */ - 'dns_lookup_family': (keyof typeof _envoy_api_v2_Cluster_DnsLookupFamily); - /** - * If DNS resolvers are specified and the cluster type is either - * :ref:`STRICT_DNS`, - * or :ref:`LOGICAL_DNS`, - * this value is used to specify the cluster’s dns resolvers. - * If this setting is not specified, the value defaults to the default - * resolver, which uses /etc/resolv.conf for configuration. For cluster types - * other than - * :ref:`STRICT_DNS` - * and :ref:`LOGICAL_DNS` - * this setting is ignored. - */ - 'dns_resolvers': (_envoy_api_v2_core_Address__Output)[]; - /** - * If specified, outlier detection will be enabled for this upstream cluster. - * Each of the configuration values can be overridden via - * :ref:`runtime values `. - */ - 'outlier_detection'?: (_envoy_api_v2_cluster_OutlierDetection__Output); - /** - * The interval for removing stale hosts from a cluster type - * :ref:`ORIGINAL_DST`. - * Hosts are considered stale if they have not been used - * as upstream destinations during this interval. New hosts are added - * to original destination clusters on demand as new connections are - * redirected to Envoy, causing the number of hosts in the cluster to - * grow over time. Hosts that are not stale (they are actively used as - * destinations) are kept in the cluster, which allows connections to - * them remain open, saving the latency that would otherwise be spent - * on opening new connections. If this setting is not specified, the - * value defaults to 5000ms. For cluster types other than - * :ref:`ORIGINAL_DST` - * this setting is ignored. - */ - 'cleanup_interval'?: (_google_protobuf_Duration__Output); - /** - * Optional configuration used to bind newly established upstream connections. - * This overrides any bind_config specified in the bootstrap proto. - * If the address and port are empty, no bind will be performed. - */ - 'upstream_bind_config'?: (_envoy_api_v2_core_BindConfig__Output); - /** - * Configuration for load balancing subsetting. - */ - 'lb_subset_config'?: (_envoy_api_v2_Cluster_LbSubsetConfig__Output); - /** - * Optional configuration for the Ring Hash load balancing policy. - */ - 'ring_hash_lb_config'?: (_envoy_api_v2_Cluster_RingHashLbConfig__Output); - /** - * Optional custom transport socket implementation to use for upstream connections. - * To setup TLS, set a transport socket with name `tls` and - * :ref:`UpstreamTlsContexts ` in the `typed_config`. - * If no transport socket configuration is specified, new connections - * will be set up with plaintext. - */ - 'transport_socket'?: (_envoy_api_v2_core_TransportSocket__Output); - /** - * The Metadata field can be used to provide additional information about the - * cluster. It can be used for stats, logging, and varying filter behavior. - * Fields should use reverse DNS notation to denote which entity within Envoy - * will need the information. For instance, if the metadata is intended for - * the Router filter, the filter name should be specified as *envoy.filters.http.router*. - */ - 'metadata'?: (_envoy_api_v2_core_Metadata__Output); - /** - * Determines how Envoy selects the protocol used to speak to upstream hosts. - */ - 'protocol_selection': (keyof typeof _envoy_api_v2_Cluster_ClusterProtocolSelection); - /** - * Common configuration for all load balancer implementations. - */ - 'common_lb_config'?: (_envoy_api_v2_Cluster_CommonLbConfig__Output); - /** - * An optional alternative to the cluster name to be used while emitting stats. - * Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be - * confused with :ref:`Router Filter Header - * `. - */ - 'alt_stat_name': (string); - /** - * Additional options when handling HTTP requests upstream. These options will be applicable to - * both HTTP1 and HTTP2 requests. - */ - 'common_http_protocol_options'?: (_envoy_api_v2_core_HttpProtocolOptions__Output); - /** - * Optional options for upstream connections. - */ - 'upstream_connection_options'?: (_envoy_api_v2_UpstreamConnectionOptions__Output); - /** - * If an upstream host becomes unhealthy (as determined by the configured health checks - * or outlier detection), immediately close all connections to the failed host. - * - * .. note:: - * - * This is currently only supported for connections created by tcp_proxy. - * - * .. note:: - * - * The current implementation of this feature closes all connections immediately when - * the unhealthy status is detected. If there are a large number of connections open - * to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of - * time exclusively closing these connections, and not processing any other traffic. - */ - 'close_connections_on_host_health_failure': (boolean); - /** - * If set to true, Envoy will ignore the health value of a host when processing its removal - * from service discovery. This means that if active health checking is used, Envoy will *not* - * wait for the endpoint to go unhealthy before removing it. - */ - 'drain_connections_on_host_removal': (boolean); - /** - * Setting this is required for specifying members of - * :ref:`STATIC`, - * :ref:`STRICT_DNS` - * or :ref:`LOGICAL_DNS` clusters. - * This field supersedes the *hosts* field in the v2 API. - * - * .. attention:: - * - * Setting this allows non-EDS cluster types to contain embedded EDS equivalent - * :ref:`endpoint assignments`. - */ - 'load_assignment'?: (_envoy_api_v2_ClusterLoadAssignment__Output); - /** - * Optional configuration for the Original Destination load balancing policy. - */ - 'original_dst_lb_config'?: (_envoy_api_v2_Cluster_OriginalDstLbConfig__Output); - /** - * The extension_protocol_options field is used to provide extension-specific protocol options - * for upstream connections. The key should match the extension filter name, such as - * "envoy.filters.network.thrift_proxy". See the extension's documentation for details on - * specific options. - */ - 'extension_protocol_options'?: ({[key: string]: _google_protobuf_Struct__Output}); - /** - * The extension_protocol_options field is used to provide extension-specific protocol options - * for upstream connections. The key should match the extension filter name, such as - * "envoy.filters.network.thrift_proxy". See the extension's documentation for details on - * specific options. - */ - 'typed_extension_protocol_options'?: ({[key: string]: _google_protobuf_Any__Output}); - /** - * Optional configuration for the LeastRequest load balancing policy. - */ - 'least_request_lb_config'?: (_envoy_api_v2_Cluster_LeastRequestLbConfig__Output); - /** - * The custom cluster type. - */ - 'cluster_type'?: (_envoy_api_v2_Cluster_CustomClusterType__Output); - /** - * Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, - * cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS - * resolution. - */ - 'respect_dns_ttl': (boolean); - /** - * An (optional) network filter chain, listed in the order the filters should be applied. - * The chain will be applied to all outgoing connections that Envoy makes to the upstream - * servers of this cluster. - */ - 'filters': (_envoy_api_v2_cluster_Filter__Output)[]; - /** - * [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the - * :ref:`lb_policy` field has the value - * :ref:`LOAD_BALANCING_POLICY_CONFIG`. - */ - 'load_balancing_policy'?: (_envoy_api_v2_LoadBalancingPolicy__Output); - /** - * [#not-implemented-hide:] - * If present, tells the client where to send load reports via LRS. If not present, the - * client will fall back to a client-side default, which may be either (a) don't send any - * load reports or (b) send load reports for all clusters to a single default server - * (which may be configured in the bootstrap file). - * - * Note that if multiple clusters point to the same LRS server, the client may choose to - * create a separate stream for each cluster or it may choose to coalesce the data for - * multiple clusters onto a single stream. Either way, the client must make sure to send - * the data for any given cluster on no more than one stream. - * - * [#next-major-version: In the v3 API, we should consider restructuring this somehow, - * maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation - * from the LRS stream here.] - */ - 'lrs_server'?: (_envoy_api_v2_core_ConfigSource__Output); - /** - * Configuration to use different transport sockets for different endpoints. - * The entry of *envoy.transport_socket_match* in the - * :ref:`LbEndpoint.Metadata ` - * is used to match against the transport sockets as they appear in the list. The first - * :ref:`match ` is used. - * For example, with the following match - * - * .. code-block:: yaml - * - * transport_socket_matches: - * - name: "enableMTLS" - * match: - * acceptMTLS: true - * transport_socket: - * name: envoy.transport_sockets.tls - * config: { ... } # tls socket configuration - * - name: "defaultToPlaintext" - * match: {} - * transport_socket: - * name: envoy.transport_sockets.raw_buffer - * - * Connections to the endpoints whose metadata value under *envoy.transport_socket_match* - * having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. - * - * If a :ref:`socket match ` with empty match - * criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" - * socket match in case above. - * - * If an endpoint metadata's value under *envoy.transport_socket_match* does not match any - * *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or - * *transport_socket* specified in this cluster. - * - * This field allows gradual and flexible transport socket configuration changes. - * - * The metadata of endpoints in EDS can indicate transport socket capabilities. For example, - * an endpoint's metadata can have two key value pairs as "acceptMTLS": "true", - * "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic - * has "acceptPlaintext": "true" metadata information. - * - * Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS - * traffic for endpoints with "acceptMTLS": "true", by adding a corresponding - * *TransportSocketMatch* in this field. Other client Envoys receive CDS without - * *transport_socket_match* set, and still send plain text traffic to the same cluster. - * - * [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] - */ - 'transport_socket_matches': (_envoy_api_v2_Cluster_TransportSocketMatch__Output)[]; - /** - * If the DNS failure refresh rate is specified and the cluster type is either - * :ref:`STRICT_DNS`, - * or :ref:`LOGICAL_DNS`, - * this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is - * not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types - * other than :ref:`STRICT_DNS` and - * :ref:`LOGICAL_DNS` this setting is - * ignored. - */ - 'dns_failure_refresh_rate'?: (_envoy_api_v2_Cluster_RefreshRate__Output); - /** - * [#next-major-version: Reconcile DNS options in a single message.] - * Always use TCP queries instead of UDP queries for DNS lookups. - */ - 'use_tcp_for_dns_lookups': (boolean); - /** - * HTTP protocol options that are applied only to upstream HTTP connections. - * These options apply to all HTTP versions. - */ - 'upstream_http_protocol_options'?: (_envoy_api_v2_core_UpstreamHttpProtocolOptions__Output); - /** - * If track_timeout_budgets is true, the :ref:`timeout budget histograms - * ` will be published for each - * request. These show what percentage of a request's per try and global timeout was used. A value - * of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value - * of 100 would indicate that the request took the entirety of the timeout given to it. - */ - 'track_timeout_budgets': (boolean); - 'cluster_discovery_type': "type"|"cluster_type"; - /** - * Optional configuration for the load balancing algorithm selected by - * LbPolicy. Currently only - * :ref:`RING_HASH` and - * :ref:`LEAST_REQUEST` - * has additional configuration options. - * Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding - * LbPolicy will generate an error at runtime. - */ - 'lb_config': "ring_hash_lb_config"|"original_dst_lb_config"|"least_request_lb_config"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/DeltaDiscoveryResponse.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/DeltaDiscoveryResponse.ts deleted file mode 100644 index 1a2584b95..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/DeltaDiscoveryResponse.ts +++ /dev/null @@ -1,63 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/discovery.proto - -import type { Resource as _envoy_api_v2_Resource, Resource__Output as _envoy_api_v2_Resource__Output } from '../../../envoy/api/v2/Resource'; - -/** - * [#next-free-field: 7] - */ -export interface DeltaDiscoveryResponse { - /** - * The version of the response data (used for debugging). - */ - 'system_version_info'?: (string); - /** - * The response resources. These are typed resources, whose types must match - * the type_url field. - */ - 'resources'?: (_envoy_api_v2_Resource)[]; - /** - * Type URL for resources. Identifies the xDS API when muxing over ADS. - * Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty. - */ - 'type_url'?: (string); - /** - * The nonce provides a way for DeltaDiscoveryRequests to uniquely - * reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required. - */ - 'nonce'?: (string); - /** - * Resources names of resources that have be deleted and to be removed from the xDS Client. - * Removed resources for missing resources can be ignored. - */ - 'removed_resources'?: (string)[]; -} - -/** - * [#next-free-field: 7] - */ -export interface DeltaDiscoveryResponse__Output { - /** - * The version of the response data (used for debugging). - */ - 'system_version_info': (string); - /** - * The response resources. These are typed resources, whose types must match - * the type_url field. - */ - 'resources': (_envoy_api_v2_Resource__Output)[]; - /** - * Type URL for resources. Identifies the xDS API when muxing over ADS. - * Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty. - */ - 'type_url': (string); - /** - * The nonce provides a way for DeltaDiscoveryRequests to uniquely - * reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required. - */ - 'nonce': (string); - /** - * Resources names of resources that have be deleted and to be removed from the xDS Client. - * Removed resources for missing resources can be ignored. - */ - 'removed_resources': (string)[]; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/Listener.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/Listener.ts deleted file mode 100644 index 8aed6564d..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/Listener.ts +++ /dev/null @@ -1,504 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/listener.proto - -import type { Address as _envoy_api_v2_core_Address, Address__Output as _envoy_api_v2_core_Address__Output } from '../../../envoy/api/v2/core/Address'; -import type { FilterChain as _envoy_api_v2_listener_FilterChain, FilterChain__Output as _envoy_api_v2_listener_FilterChain__Output } from '../../../envoy/api/v2/listener/FilterChain'; -import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../google/protobuf/BoolValue'; -import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../google/protobuf/UInt32Value'; -import type { Metadata as _envoy_api_v2_core_Metadata, Metadata__Output as _envoy_api_v2_core_Metadata__Output } from '../../../envoy/api/v2/core/Metadata'; -import type { ListenerFilter as _envoy_api_v2_listener_ListenerFilter, ListenerFilter__Output as _envoy_api_v2_listener_ListenerFilter__Output } from '../../../envoy/api/v2/listener/ListenerFilter'; -import type { SocketOption as _envoy_api_v2_core_SocketOption, SocketOption__Output as _envoy_api_v2_core_SocketOption__Output } from '../../../envoy/api/v2/core/SocketOption'; -import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../google/protobuf/Duration'; -import type { TrafficDirection as _envoy_api_v2_core_TrafficDirection } from '../../../envoy/api/v2/core/TrafficDirection'; -import type { UdpListenerConfig as _envoy_api_v2_listener_UdpListenerConfig, UdpListenerConfig__Output as _envoy_api_v2_listener_UdpListenerConfig__Output } from '../../../envoy/api/v2/listener/UdpListenerConfig'; -import type { ApiListener as _envoy_config_listener_v2_ApiListener, ApiListener__Output as _envoy_config_listener_v2_ApiListener__Output } from '../../../envoy/config/listener/v2/ApiListener'; -import type { AccessLog as _envoy_config_filter_accesslog_v2_AccessLog, AccessLog__Output as _envoy_config_filter_accesslog_v2_AccessLog__Output } from '../../../envoy/config/filter/accesslog/v2/AccessLog'; - -/** - * Configuration for listener connection balancing. - */ -export interface _envoy_api_v2_Listener_ConnectionBalanceConfig { - /** - * If specified, the listener will use the exact connection balancer. - */ - 'exact_balance'?: (_envoy_api_v2_Listener_ConnectionBalanceConfig_ExactBalance); - 'balance_type'?: "exact_balance"; -} - -/** - * Configuration for listener connection balancing. - */ -export interface _envoy_api_v2_Listener_ConnectionBalanceConfig__Output { - /** - * If specified, the listener will use the exact connection balancer. - */ - 'exact_balance'?: (_envoy_api_v2_Listener_ConnectionBalanceConfig_ExactBalance__Output); - 'balance_type': "exact_balance"; -} - -/** - * [#not-implemented-hide:] - */ -export interface _envoy_api_v2_Listener_DeprecatedV1 { - /** - * Whether the listener should bind to the port. A listener that doesn't - * bind can only receive connections redirected from other listeners that - * set use_original_dst parameter to true. Default is true. - * - * This is deprecated in v2, all Listeners will bind to their port. An - * additional filter chain must be created for every original destination - * port this listener may redirect to in v2, with the original port - * specified in the FilterChainMatch destination_port field. - * - * [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.] - */ - 'bind_to_port'?: (_google_protobuf_BoolValue); -} - -/** - * [#not-implemented-hide:] - */ -export interface _envoy_api_v2_Listener_DeprecatedV1__Output { - /** - * Whether the listener should bind to the port. A listener that doesn't - * bind can only receive connections redirected from other listeners that - * set use_original_dst parameter to true. Default is true. - * - * This is deprecated in v2, all Listeners will bind to their port. An - * additional filter chain must be created for every original destination - * port this listener may redirect to in v2, with the original port - * specified in the FilterChainMatch destination_port field. - * - * [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.] - */ - 'bind_to_port'?: (_google_protobuf_BoolValue__Output); -} - -// Original file: deps/envoy-api/envoy/api/v2/listener.proto - -export enum _envoy_api_v2_Listener_DrainType { - /** - * Drain in response to calling /healthcheck/fail admin endpoint (along with the health check - * filter), listener removal/modification, and hot restart. - */ - DEFAULT = 0, - /** - * Drain in response to listener removal/modification and hot restart. This setting does not - * include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress - * and egress listeners. - */ - MODIFY_ONLY = 1, -} - -/** - * A connection balancer implementation that does exact balancing. This means that a lock is - * held during balancing so that connection counts are nearly exactly balanced between worker - * threads. This is "nearly" exact in the sense that a connection might close in parallel thus - * making the counts incorrect, but this should be rectified on the next accept. This balancer - * sacrifices accept throughput for accuracy and should be used when there are a small number of - * connections that rarely cycle (e.g., service mesh gRPC egress). - */ -export interface _envoy_api_v2_Listener_ConnectionBalanceConfig_ExactBalance { -} - -/** - * A connection balancer implementation that does exact balancing. This means that a lock is - * held during balancing so that connection counts are nearly exactly balanced between worker - * threads. This is "nearly" exact in the sense that a connection might close in parallel thus - * making the counts incorrect, but this should be rectified on the next accept. This balancer - * sacrifices accept throughput for accuracy and should be used when there are a small number of - * connections that rarely cycle (e.g., service mesh gRPC egress). - */ -export interface _envoy_api_v2_Listener_ConnectionBalanceConfig_ExactBalance__Output { -} - -/** - * [#next-free-field: 23] - */ -export interface Listener { - /** - * The unique name by which this listener is known. If no name is provided, - * Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically - * updated or removed via :ref:`LDS ` a unique name must be provided. - */ - 'name'?: (string); - /** - * The address that the listener should listen on. In general, the address must be unique, though - * that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on - * Linux as the actual port will be allocated by the OS. - */ - 'address'?: (_envoy_api_v2_core_Address); - /** - * A list of filter chains to consider for this listener. The - * :ref:`FilterChain ` with the most specific - * :ref:`FilterChainMatch ` criteria is used on a - * connection. - * - * Example using SNI for filter chain selection can be found in the - * :ref:`FAQ entry `. - */ - 'filter_chains'?: (_envoy_api_v2_listener_FilterChain)[]; - /** - * If a connection is redirected using *iptables*, the port on which the proxy - * receives it might be different from the original destination address. When this flag is set to - * true, the listener hands off redirected connections to the listener associated with the - * original destination address. If there is no listener associated with the original destination - * address, the connection is handled by the listener that receives it. Defaults to false. - * - * .. attention:: - * - * This field is deprecated. Use :ref:`an original_dst ` - * :ref:`listener filter ` instead. - * - * Note that hand off to another listener is *NOT* performed without this flag. Once - * :ref:`FilterChainMatch ` is implemented this flag - * will be removed, as filter chain matching can be used to select a filter chain based on the - * restored destination address. - */ - 'use_original_dst'?: (_google_protobuf_BoolValue); - /** - * Soft limit on size of the listener’s new connection read and write buffers. - * If unspecified, an implementation defined default is applied (1MiB). - */ - 'per_connection_buffer_limit_bytes'?: (_google_protobuf_UInt32Value); - /** - * Listener metadata. - */ - 'metadata'?: (_envoy_api_v2_core_Metadata); - /** - * [#not-implemented-hide:] - */ - 'deprecated_v1'?: (_envoy_api_v2_Listener_DeprecatedV1); - /** - * The type of draining to perform at a listener-wide level. - */ - 'drain_type'?: (_envoy_api_v2_Listener_DrainType | keyof typeof _envoy_api_v2_Listener_DrainType); - /** - * Listener filters have the opportunity to manipulate and augment the connection metadata that - * is used in connection filter chain matching, for example. These filters are run before any in - * :ref:`filter_chains `. Order matters as the - * filters are processed sequentially right after a socket has been accepted by the listener, and - * before a connection is created. - * UDP Listener filters can be specified when the protocol in the listener socket address in - * :ref:`protocol ` is :ref:`UDP - * `. - * UDP listeners currently support a single filter. - */ - 'listener_filters'?: (_envoy_api_v2_listener_ListenerFilter)[]; - /** - * Whether the listener should be set as a transparent socket. - * When this flag is set to true, connections can be redirected to the listener using an - * *iptables* *TPROXY* target, in which case the original source and destination addresses and - * ports are preserved on accepted connections. This flag should be used in combination with - * :ref:`an original_dst ` :ref:`listener filter - * ` to mark the connections' local addresses as - * "restored." This can be used to hand off each redirected connection to another listener - * associated with the connection's destination address. Direct connections to the socket without - * using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are - * therefore treated as if they were redirected. - * When this flag is set to false, the listener's socket is explicitly reset as non-transparent. - * Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability. - * When this flag is not set (default), the socket is not modified, i.e. the transparent option - * is neither set nor reset. - */ - 'transparent'?: (_google_protobuf_BoolValue); - /** - * Whether the listener should set the *IP_FREEBIND* socket option. When this - * flag is set to true, listeners can be bound to an IP address that is not - * configured on the system running Envoy. When this flag is set to false, the - * option *IP_FREEBIND* is disabled on the socket. When this flag is not set - * (default), the socket is not modified, i.e. the option is neither enabled - * nor disabled. - */ - 'freebind'?: (_google_protobuf_BoolValue); - /** - * Whether the listener should accept TCP Fast Open (TFO) connections. - * When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on - * the socket, with a queue length of the specified size - * (see `details in RFC7413 `_). - * When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. - * When this flag is not set (default), the socket is not modified, - * i.e. the option is neither enabled nor disabled. - * - * On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable - * TCP_FASTOPEN. - * See `ip-sysctl.txt `_. - * - * On macOS, only values of 0, 1, and unset are valid; other values may result in an error. - * To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. - */ - 'tcp_fast_open_queue_length'?: (_google_protobuf_UInt32Value); - /** - * Additional socket options that may not be present in Envoy source code or - * precompiled binaries. - */ - 'socket_options'?: (_envoy_api_v2_core_SocketOption)[]; - /** - * The timeout to wait for all listener filters to complete operation. If the timeout is reached, - * the accepted socket is closed without a connection being created unless - * `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the - * timeout. If not specified, a default timeout of 15s is used. - */ - 'listener_filters_timeout'?: (_google_protobuf_Duration); - /** - * Specifies the intended direction of the traffic relative to the local Envoy. - */ - 'traffic_direction'?: (_envoy_api_v2_core_TrafficDirection | keyof typeof _envoy_api_v2_core_TrafficDirection); - /** - * Whether a connection should be created when listener filters timeout. Default is false. - * - * .. attention:: - * - * Some listener filters, such as :ref:`Proxy Protocol filter - * `, should not be used with this option. It will cause - * unexpected behavior when a connection is created. - */ - 'continue_on_listener_filters_timeout'?: (boolean); - /** - * If the protocol in the listener socket address in :ref:`protocol - * ` is :ref:`UDP - * `, this field specifies the actual udp - * listener to create, i.e. :ref:`udp_listener_name - * ` = "raw_udp_listener" for - * creating a packet-oriented UDP listener. If not present, treat it as "raw_udp_listener". - */ - 'udp_listener_config'?: (_envoy_api_v2_listener_UdpListenerConfig); - /** - * Used to represent an API listener, which is used in non-proxy clients. The type of API - * exposed to the non-proxy application depends on the type of API listener. - * When this field is set, no other field except for :ref:`name` - * should be set. - * - * .. note:: - * - * Currently only one ApiListener can be installed; and it can only be done via bootstrap config, - * not LDS. - * - * [#next-major-version: In the v3 API, instead of this messy approach where the socket - * listener fields are directly in the top-level Listener message and the API listener types - * are in the ApiListener message, the socket listener messages should be in their own message, - * and the top-level Listener should essentially be a oneof that selects between the - * socket listener and the various types of API listener. That way, a given Listener message - * can structurally only contain the fields of the relevant type.] - */ - 'api_listener'?: (_envoy_config_listener_v2_ApiListener); - /** - * The listener's connection balancer configuration, currently only applicable to TCP listeners. - * If no configuration is specified, Envoy will not attempt to balance active connections between - * worker threads. - */ - 'connection_balance_config'?: (_envoy_api_v2_Listener_ConnectionBalanceConfig); - /** - * When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and - * create one socket for each worker thread. This makes inbound connections - * distribute among worker threads roughly evenly in cases where there are a high number - * of connections. When this flag is set to false, all worker threads share one socket. - * - * Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart - * (see `3rd paragraph in 'soreuseport' commit message - * `_). - * This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket - * `_. - */ - 'reuse_port'?: (boolean); - /** - * Configuration for :ref:`access logs ` - * emitted by this listener. - */ - 'access_log'?: (_envoy_config_filter_accesslog_v2_AccessLog)[]; -} - -/** - * [#next-free-field: 23] - */ -export interface Listener__Output { - /** - * The unique name by which this listener is known. If no name is provided, - * Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically - * updated or removed via :ref:`LDS ` a unique name must be provided. - */ - 'name': (string); - /** - * The address that the listener should listen on. In general, the address must be unique, though - * that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on - * Linux as the actual port will be allocated by the OS. - */ - 'address'?: (_envoy_api_v2_core_Address__Output); - /** - * A list of filter chains to consider for this listener. The - * :ref:`FilterChain ` with the most specific - * :ref:`FilterChainMatch ` criteria is used on a - * connection. - * - * Example using SNI for filter chain selection can be found in the - * :ref:`FAQ entry `. - */ - 'filter_chains': (_envoy_api_v2_listener_FilterChain__Output)[]; - /** - * If a connection is redirected using *iptables*, the port on which the proxy - * receives it might be different from the original destination address. When this flag is set to - * true, the listener hands off redirected connections to the listener associated with the - * original destination address. If there is no listener associated with the original destination - * address, the connection is handled by the listener that receives it. Defaults to false. - * - * .. attention:: - * - * This field is deprecated. Use :ref:`an original_dst ` - * :ref:`listener filter ` instead. - * - * Note that hand off to another listener is *NOT* performed without this flag. Once - * :ref:`FilterChainMatch ` is implemented this flag - * will be removed, as filter chain matching can be used to select a filter chain based on the - * restored destination address. - */ - 'use_original_dst'?: (_google_protobuf_BoolValue__Output); - /** - * Soft limit on size of the listener’s new connection read and write buffers. - * If unspecified, an implementation defined default is applied (1MiB). - */ - 'per_connection_buffer_limit_bytes'?: (_google_protobuf_UInt32Value__Output); - /** - * Listener metadata. - */ - 'metadata'?: (_envoy_api_v2_core_Metadata__Output); - /** - * [#not-implemented-hide:] - */ - 'deprecated_v1'?: (_envoy_api_v2_Listener_DeprecatedV1__Output); - /** - * The type of draining to perform at a listener-wide level. - */ - 'drain_type': (keyof typeof _envoy_api_v2_Listener_DrainType); - /** - * Listener filters have the opportunity to manipulate and augment the connection metadata that - * is used in connection filter chain matching, for example. These filters are run before any in - * :ref:`filter_chains `. Order matters as the - * filters are processed sequentially right after a socket has been accepted by the listener, and - * before a connection is created. - * UDP Listener filters can be specified when the protocol in the listener socket address in - * :ref:`protocol ` is :ref:`UDP - * `. - * UDP listeners currently support a single filter. - */ - 'listener_filters': (_envoy_api_v2_listener_ListenerFilter__Output)[]; - /** - * Whether the listener should be set as a transparent socket. - * When this flag is set to true, connections can be redirected to the listener using an - * *iptables* *TPROXY* target, in which case the original source and destination addresses and - * ports are preserved on accepted connections. This flag should be used in combination with - * :ref:`an original_dst ` :ref:`listener filter - * ` to mark the connections' local addresses as - * "restored." This can be used to hand off each redirected connection to another listener - * associated with the connection's destination address. Direct connections to the socket without - * using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are - * therefore treated as if they were redirected. - * When this flag is set to false, the listener's socket is explicitly reset as non-transparent. - * Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability. - * When this flag is not set (default), the socket is not modified, i.e. the transparent option - * is neither set nor reset. - */ - 'transparent'?: (_google_protobuf_BoolValue__Output); - /** - * Whether the listener should set the *IP_FREEBIND* socket option. When this - * flag is set to true, listeners can be bound to an IP address that is not - * configured on the system running Envoy. When this flag is set to false, the - * option *IP_FREEBIND* is disabled on the socket. When this flag is not set - * (default), the socket is not modified, i.e. the option is neither enabled - * nor disabled. - */ - 'freebind'?: (_google_protobuf_BoolValue__Output); - /** - * Whether the listener should accept TCP Fast Open (TFO) connections. - * When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on - * the socket, with a queue length of the specified size - * (see `details in RFC7413 `_). - * When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. - * When this flag is not set (default), the socket is not modified, - * i.e. the option is neither enabled nor disabled. - * - * On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable - * TCP_FASTOPEN. - * See `ip-sysctl.txt `_. - * - * On macOS, only values of 0, 1, and unset are valid; other values may result in an error. - * To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. - */ - 'tcp_fast_open_queue_length'?: (_google_protobuf_UInt32Value__Output); - /** - * Additional socket options that may not be present in Envoy source code or - * precompiled binaries. - */ - 'socket_options': (_envoy_api_v2_core_SocketOption__Output)[]; - /** - * The timeout to wait for all listener filters to complete operation. If the timeout is reached, - * the accepted socket is closed without a connection being created unless - * `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the - * timeout. If not specified, a default timeout of 15s is used. - */ - 'listener_filters_timeout'?: (_google_protobuf_Duration__Output); - /** - * Specifies the intended direction of the traffic relative to the local Envoy. - */ - 'traffic_direction': (keyof typeof _envoy_api_v2_core_TrafficDirection); - /** - * Whether a connection should be created when listener filters timeout. Default is false. - * - * .. attention:: - * - * Some listener filters, such as :ref:`Proxy Protocol filter - * `, should not be used with this option. It will cause - * unexpected behavior when a connection is created. - */ - 'continue_on_listener_filters_timeout': (boolean); - /** - * If the protocol in the listener socket address in :ref:`protocol - * ` is :ref:`UDP - * `, this field specifies the actual udp - * listener to create, i.e. :ref:`udp_listener_name - * ` = "raw_udp_listener" for - * creating a packet-oriented UDP listener. If not present, treat it as "raw_udp_listener". - */ - 'udp_listener_config'?: (_envoy_api_v2_listener_UdpListenerConfig__Output); - /** - * Used to represent an API listener, which is used in non-proxy clients. The type of API - * exposed to the non-proxy application depends on the type of API listener. - * When this field is set, no other field except for :ref:`name` - * should be set. - * - * .. note:: - * - * Currently only one ApiListener can be installed; and it can only be done via bootstrap config, - * not LDS. - * - * [#next-major-version: In the v3 API, instead of this messy approach where the socket - * listener fields are directly in the top-level Listener message and the API listener types - * are in the ApiListener message, the socket listener messages should be in their own message, - * and the top-level Listener should essentially be a oneof that selects between the - * socket listener and the various types of API listener. That way, a given Listener message - * can structurally only contain the fields of the relevant type.] - */ - 'api_listener'?: (_envoy_config_listener_v2_ApiListener__Output); - /** - * The listener's connection balancer configuration, currently only applicable to TCP listeners. - * If no configuration is specified, Envoy will not attempt to balance active connections between - * worker threads. - */ - 'connection_balance_config'?: (_envoy_api_v2_Listener_ConnectionBalanceConfig__Output); - /** - * When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and - * create one socket for each worker thread. This makes inbound connections - * distribute among worker threads roughly evenly in cases where there are a high number - * of connections. When this flag is set to false, all worker threads share one socket. - * - * Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart - * (see `3rd paragraph in 'soreuseport' commit message - * `_). - * This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket - * `_. - */ - 'reuse_port': (boolean); - /** - * Configuration for :ref:`access logs ` - * emitted by this listener. - */ - 'access_log': (_envoy_config_filter_accesslog_v2_AccessLog__Output)[]; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/Resource.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/Resource.ts deleted file mode 100644 index 4804201bd..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/Resource.ts +++ /dev/null @@ -1,43 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/discovery.proto - -import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../google/protobuf/Any'; - -export interface Resource { - /** - * The resource level version. It allows xDS to track the state of individual - * resources. - */ - 'version'?: (string); - /** - * The resource being tracked. - */ - 'resource'?: (_google_protobuf_Any); - /** - * The resource's name, to distinguish it from others of the same type of resource. - */ - 'name'?: (string); - /** - * The aliases are a list of other names that this resource can go by. - */ - 'aliases'?: (string)[]; -} - -export interface Resource__Output { - /** - * The resource level version. It allows xDS to track the state of individual - * resources. - */ - 'version': (string); - /** - * The resource being tracked. - */ - 'resource'?: (_google_protobuf_Any__Output); - /** - * The resource's name, to distinguish it from others of the same type of resource. - */ - 'name': (string); - /** - * The aliases are a list of other names that this resource can go by. - */ - 'aliases': (string)[]; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/RouteConfiguration.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/RouteConfiguration.ts deleted file mode 100644 index de0634c24..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/RouteConfiguration.ts +++ /dev/null @@ -1,186 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/route.proto - -import type { VirtualHost as _envoy_api_v2_route_VirtualHost, VirtualHost__Output as _envoy_api_v2_route_VirtualHost__Output } from '../../../envoy/api/v2/route/VirtualHost'; -import type { HeaderValueOption as _envoy_api_v2_core_HeaderValueOption, HeaderValueOption__Output as _envoy_api_v2_core_HeaderValueOption__Output } from '../../../envoy/api/v2/core/HeaderValueOption'; -import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../google/protobuf/BoolValue'; -import type { Vhds as _envoy_api_v2_Vhds, Vhds__Output as _envoy_api_v2_Vhds__Output } from '../../../envoy/api/v2/Vhds'; - -/** - * [#next-free-field: 11] - */ -export interface RouteConfiguration { - /** - * The name of the route configuration. For example, it might match - * :ref:`route_config_name - * ` in - * :ref:`envoy_api_msg_config.filter.network.http_connection_manager.v2.Rds`. - */ - 'name'?: (string); - /** - * An array of virtual hosts that make up the route table. - */ - 'virtual_hosts'?: (_envoy_api_v2_route_VirtualHost)[]; - /** - * Optionally specifies a list of HTTP headers that the connection manager - * will consider to be internal only. If they are found on external requests they will be cleaned - * prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more - * information. - */ - 'internal_only_headers'?: (string)[]; - /** - * Specifies a list of HTTP headers that should be added to each response that - * the connection manager encodes. Headers specified at this level are applied - * after headers from any enclosed :ref:`envoy_api_msg_route.VirtualHost` or - * :ref:`envoy_api_msg_route.RouteAction`. For more information, including details on - * header value syntax, see the documentation on :ref:`custom request headers - * `. - */ - 'response_headers_to_add'?: (_envoy_api_v2_core_HeaderValueOption)[]; - /** - * Specifies a list of HTTP headers that should be removed from each response - * that the connection manager encodes. - */ - 'response_headers_to_remove'?: (string)[]; - /** - * Specifies a list of HTTP headers that should be added to each request - * routed by the HTTP connection manager. Headers specified at this level are - * applied after headers from any enclosed :ref:`envoy_api_msg_route.VirtualHost` or - * :ref:`envoy_api_msg_route.RouteAction`. For more information, including details on - * header value syntax, see the documentation on :ref:`custom request headers - * `. - */ - 'request_headers_to_add'?: (_envoy_api_v2_core_HeaderValueOption)[]; - /** - * An optional boolean that specifies whether the clusters that the route - * table refers to will be validated by the cluster manager. If set to true - * and a route refers to a non-existent cluster, the route table will not - * load. If set to false and a route refers to a non-existent cluster, the - * route table will load and the router filter will return a 404 if the route - * is selected at runtime. This setting defaults to true if the route table - * is statically defined via the :ref:`route_config - * ` - * option. This setting default to false if the route table is loaded dynamically via the - * :ref:`rds - * ` - * option. Users may wish to override the default behavior in certain cases (for example when - * using CDS with a static route table). - */ - 'validate_clusters'?: (_google_protobuf_BoolValue); - /** - * Specifies a list of HTTP headers that should be removed from each request - * routed by the HTTP connection manager. - */ - 'request_headers_to_remove'?: (string)[]; - /** - * An array of virtual hosts will be dynamically loaded via the VHDS API. - * Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used - * for a base routing table or for infrequently changing virtual hosts. *vhds* is used for - * on-demand discovery of virtual hosts. The contents of these two fields will be merged to - * generate a routing table for a given RouteConfiguration, with *vhds* derived configuration - * taking precedence. - */ - 'vhds'?: (_envoy_api_v2_Vhds); - /** - * By default, headers that should be added/removed are evaluated from most to least specific: - * - * * route level - * * virtual host level - * * connection manager level - * - * To allow setting overrides at the route or virtual host level, this order can be reversed - * by setting this option to true. Defaults to false. - * - * [#next-major-version: In the v3 API, this will default to true.] - */ - 'most_specific_header_mutations_wins'?: (boolean); -} - -/** - * [#next-free-field: 11] - */ -export interface RouteConfiguration__Output { - /** - * The name of the route configuration. For example, it might match - * :ref:`route_config_name - * ` in - * :ref:`envoy_api_msg_config.filter.network.http_connection_manager.v2.Rds`. - */ - 'name': (string); - /** - * An array of virtual hosts that make up the route table. - */ - 'virtual_hosts': (_envoy_api_v2_route_VirtualHost__Output)[]; - /** - * Optionally specifies a list of HTTP headers that the connection manager - * will consider to be internal only. If they are found on external requests they will be cleaned - * prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more - * information. - */ - 'internal_only_headers': (string)[]; - /** - * Specifies a list of HTTP headers that should be added to each response that - * the connection manager encodes. Headers specified at this level are applied - * after headers from any enclosed :ref:`envoy_api_msg_route.VirtualHost` or - * :ref:`envoy_api_msg_route.RouteAction`. For more information, including details on - * header value syntax, see the documentation on :ref:`custom request headers - * `. - */ - 'response_headers_to_add': (_envoy_api_v2_core_HeaderValueOption__Output)[]; - /** - * Specifies a list of HTTP headers that should be removed from each response - * that the connection manager encodes. - */ - 'response_headers_to_remove': (string)[]; - /** - * Specifies a list of HTTP headers that should be added to each request - * routed by the HTTP connection manager. Headers specified at this level are - * applied after headers from any enclosed :ref:`envoy_api_msg_route.VirtualHost` or - * :ref:`envoy_api_msg_route.RouteAction`. For more information, including details on - * header value syntax, see the documentation on :ref:`custom request headers - * `. - */ - 'request_headers_to_add': (_envoy_api_v2_core_HeaderValueOption__Output)[]; - /** - * An optional boolean that specifies whether the clusters that the route - * table refers to will be validated by the cluster manager. If set to true - * and a route refers to a non-existent cluster, the route table will not - * load. If set to false and a route refers to a non-existent cluster, the - * route table will load and the router filter will return a 404 if the route - * is selected at runtime. This setting defaults to true if the route table - * is statically defined via the :ref:`route_config - * ` - * option. This setting default to false if the route table is loaded dynamically via the - * :ref:`rds - * ` - * option. Users may wish to override the default behavior in certain cases (for example when - * using CDS with a static route table). - */ - 'validate_clusters'?: (_google_protobuf_BoolValue__Output); - /** - * Specifies a list of HTTP headers that should be removed from each request - * routed by the HTTP connection manager. - */ - 'request_headers_to_remove': (string)[]; - /** - * An array of virtual hosts will be dynamically loaded via the VHDS API. - * Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used - * for a base routing table or for infrequently changing virtual hosts. *vhds* is used for - * on-demand discovery of virtual hosts. The contents of these two fields will be merged to - * generate a routing table for a given RouteConfiguration, with *vhds* derived configuration - * taking precedence. - */ - 'vhds'?: (_envoy_api_v2_Vhds__Output); - /** - * By default, headers that should be added/removed are evaluated from most to least specific: - * - * * route level - * * virtual host level - * * connection manager level - * - * To allow setting overrides at the route or virtual host level, this order can be reversed - * by setting this option to true. Defaults to false. - * - * [#next-major-version: In the v3 API, this will default to true.] - */ - 'most_specific_header_mutations_wins': (boolean); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/ScopedRouteConfiguration.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/ScopedRouteConfiguration.ts deleted file mode 100644 index 02810bf02..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/ScopedRouteConfiguration.ts +++ /dev/null @@ -1,204 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/scoped_route.proto - - -export interface _envoy_api_v2_ScopedRouteConfiguration_Key_Fragment { - /** - * A string to match against. - */ - 'string_key'?: (string); - 'type'?: "string_key"; -} - -export interface _envoy_api_v2_ScopedRouteConfiguration_Key_Fragment__Output { - /** - * A string to match against. - */ - 'string_key'?: (string); - 'type': "string_key"; -} - -/** - * Specifies a key which is matched against the output of the - * :ref:`scope_key_builder` - * specified in the HttpConnectionManager. The matching is done per HTTP - * request and is dependent on the order of the fragments contained in the - * Key. - */ -export interface _envoy_api_v2_ScopedRouteConfiguration_Key { - /** - * The ordered set of fragments to match against. The order must match the - * fragments in the corresponding - * :ref:`scope_key_builder`. - */ - 'fragments'?: (_envoy_api_v2_ScopedRouteConfiguration_Key_Fragment)[]; -} - -/** - * Specifies a key which is matched against the output of the - * :ref:`scope_key_builder` - * specified in the HttpConnectionManager. The matching is done per HTTP - * request and is dependent on the order of the fragments contained in the - * Key. - */ -export interface _envoy_api_v2_ScopedRouteConfiguration_Key__Output { - /** - * The ordered set of fragments to match against. The order must match the - * fragments in the corresponding - * :ref:`scope_key_builder`. - */ - 'fragments': (_envoy_api_v2_ScopedRouteConfiguration_Key_Fragment__Output)[]; -} - -/** - * Specifies a routing scope, which associates a - * :ref:`Key` to a - * :ref:`envoy_api_msg_RouteConfiguration` (identified by its resource name). - * - * The HTTP connection manager builds up a table consisting of these Key to - * RouteConfiguration mappings, and looks up the RouteConfiguration to use per - * request according to the algorithm specified in the - * :ref:`scope_key_builder` - * assigned to the HttpConnectionManager. - * - * For example, with the following configurations (in YAML): - * - * HttpConnectionManager config: - * - * .. code:: - * - * ... - * scoped_routes: - * name: foo-scoped-routes - * scope_key_builder: - * fragments: - * - header_value_extractor: - * name: X-Route-Selector - * element_separator: , - * element: - * separator: = - * key: vip - * - * ScopedRouteConfiguration resources (specified statically via - * :ref:`scoped_route_configurations_list` - * or obtained dynamically via SRDS): - * - * .. code:: - * - * (1) - * name: route-scope1 - * route_configuration_name: route-config1 - * key: - * fragments: - * - string_key: 172.10.10.20 - * - * (2) - * name: route-scope2 - * route_configuration_name: route-config2 - * key: - * fragments: - * - string_key: 172.20.20.30 - * - * A request from a client such as: - * - * .. code:: - * - * GET / HTTP/1.1 - * Host: foo.com - * X-Route-Selector: vip=172.10.10.20 - * - * would result in the routing table defined by the `route-config1` - * RouteConfiguration being assigned to the HTTP request/stream. - */ -export interface ScopedRouteConfiguration { - /** - * The name assigned to the routing scope. - */ - 'name'?: (string); - /** - * The resource name to use for a :ref:`envoy_api_msg_DiscoveryRequest` to an - * RDS server to fetch the :ref:`envoy_api_msg_RouteConfiguration` associated - * with this scope. - */ - 'route_configuration_name'?: (string); - /** - * The key to match against. - */ - 'key'?: (_envoy_api_v2_ScopedRouteConfiguration_Key); -} - -/** - * Specifies a routing scope, which associates a - * :ref:`Key` to a - * :ref:`envoy_api_msg_RouteConfiguration` (identified by its resource name). - * - * The HTTP connection manager builds up a table consisting of these Key to - * RouteConfiguration mappings, and looks up the RouteConfiguration to use per - * request according to the algorithm specified in the - * :ref:`scope_key_builder` - * assigned to the HttpConnectionManager. - * - * For example, with the following configurations (in YAML): - * - * HttpConnectionManager config: - * - * .. code:: - * - * ... - * scoped_routes: - * name: foo-scoped-routes - * scope_key_builder: - * fragments: - * - header_value_extractor: - * name: X-Route-Selector - * element_separator: , - * element: - * separator: = - * key: vip - * - * ScopedRouteConfiguration resources (specified statically via - * :ref:`scoped_route_configurations_list` - * or obtained dynamically via SRDS): - * - * .. code:: - * - * (1) - * name: route-scope1 - * route_configuration_name: route-config1 - * key: - * fragments: - * - string_key: 172.10.10.20 - * - * (2) - * name: route-scope2 - * route_configuration_name: route-config2 - * key: - * fragments: - * - string_key: 172.20.20.30 - * - * A request from a client such as: - * - * .. code:: - * - * GET / HTTP/1.1 - * Host: foo.com - * X-Route-Selector: vip=172.10.10.20 - * - * would result in the routing table defined by the `route-config1` - * RouteConfiguration being assigned to the HTTP request/stream. - */ -export interface ScopedRouteConfiguration__Output { - /** - * The name assigned to the routing scope. - */ - 'name': (string); - /** - * The resource name to use for a :ref:`envoy_api_msg_DiscoveryRequest` to an - * RDS server to fetch the :ref:`envoy_api_msg_RouteConfiguration` associated - * with this scope. - */ - 'route_configuration_name': (string); - /** - * The key to match against. - */ - 'key'?: (_envoy_api_v2_ScopedRouteConfiguration_Key__Output); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/UpstreamBindConfig.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/UpstreamBindConfig.ts deleted file mode 100644 index 966ccca85..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/UpstreamBindConfig.ts +++ /dev/null @@ -1,25 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/cluster.proto - -import type { Address as _envoy_api_v2_core_Address, Address__Output as _envoy_api_v2_core_Address__Output } from '../../../envoy/api/v2/core/Address'; - -/** - * An extensible structure containing the address Envoy should bind to when - * establishing upstream connections. - */ -export interface UpstreamBindConfig { - /** - * The address Envoy should bind to when establishing upstream connections. - */ - 'source_address'?: (_envoy_api_v2_core_Address); -} - -/** - * An extensible structure containing the address Envoy should bind to when - * establishing upstream connections. - */ -export interface UpstreamBindConfig__Output { - /** - * The address Envoy should bind to when establishing upstream connections. - */ - 'source_address'?: (_envoy_api_v2_core_Address__Output); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/UpstreamConnectionOptions.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/UpstreamConnectionOptions.ts deleted file mode 100644 index e46a2cd06..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/UpstreamConnectionOptions.ts +++ /dev/null @@ -1,17 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/cluster.proto - -import type { TcpKeepalive as _envoy_api_v2_core_TcpKeepalive, TcpKeepalive__Output as _envoy_api_v2_core_TcpKeepalive__Output } from '../../../envoy/api/v2/core/TcpKeepalive'; - -export interface UpstreamConnectionOptions { - /** - * If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. - */ - 'tcp_keepalive'?: (_envoy_api_v2_core_TcpKeepalive); -} - -export interface UpstreamConnectionOptions__Output { - /** - * If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. - */ - 'tcp_keepalive'?: (_envoy_api_v2_core_TcpKeepalive__Output); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/Vhds.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/Vhds.ts deleted file mode 100644 index f2ec45d2d..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/Vhds.ts +++ /dev/null @@ -1,17 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/route.proto - -import type { ConfigSource as _envoy_api_v2_core_ConfigSource, ConfigSource__Output as _envoy_api_v2_core_ConfigSource__Output } from '../../../envoy/api/v2/core/ConfigSource'; - -export interface Vhds { - /** - * Configuration source specifier for VHDS. - */ - 'config_source'?: (_envoy_api_v2_core_ConfigSource); -} - -export interface Vhds__Output { - /** - * Configuration source specifier for VHDS. - */ - 'config_source'?: (_envoy_api_v2_core_ConfigSource__Output); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/CertificateValidationContext.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/CertificateValidationContext.ts deleted file mode 100644 index 0272f3b5c..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/CertificateValidationContext.ts +++ /dev/null @@ -1,315 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/auth/common.proto - -import type { DataSource as _envoy_api_v2_core_DataSource, DataSource__Output as _envoy_api_v2_core_DataSource__Output } from '../../../../envoy/api/v2/core/DataSource'; -import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; -import type { StringMatcher as _envoy_type_matcher_StringMatcher, StringMatcher__Output as _envoy_type_matcher_StringMatcher__Output } from '../../../../envoy/type/matcher/StringMatcher'; - -// Original file: deps/envoy-api/envoy/api/v2/auth/common.proto - -/** - * Peer certificate verification mode. - */ -export enum _envoy_api_v2_auth_CertificateValidationContext_TrustChainVerification { - /** - * Perform default certificate verification (e.g., against CA / verification lists) - */ - VERIFY_TRUST_CHAIN = 0, - /** - * Connections where the certificate fails verification will be permitted. - * For HTTP connections, the result of certificate verification can be used in route matching. ( - * see :ref:`validated ` ). - */ - ACCEPT_UNTRUSTED = 1, -} - -/** - * [#next-free-field: 11] - */ -export interface CertificateValidationContext { - /** - * TLS certificate data containing certificate authority certificates to use in verifying - * a presented peer certificate (e.g. server certificate for clusters or client certificate - * for listeners). If not specified and a peer certificate is presented it will not be - * verified. By default, a client certificate is optional, unless one of the additional - * options (:ref:`require_client_certificate - * `, - * :ref:`verify_certificate_spki - * `, - * :ref:`verify_certificate_hash - * `, or - * :ref:`match_subject_alt_names - * `) is also - * specified. - * - * It can optionally contain certificate revocation lists, in which case Envoy will verify - * that the presented peer certificate has not been revoked by one of the included CRLs. - * - * See :ref:`the TLS overview ` for a list of common - * system CA locations. - */ - 'trusted_ca'?: (_envoy_api_v2_core_DataSource); - /** - * An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that - * the SHA-256 of the DER-encoded presented certificate matches one of the specified values. - * - * A hex-encoded SHA-256 of the certificate can be generated with the following command: - * - * .. code-block:: bash - * - * $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 - * df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a - * - * A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate - * can be generated with the following command: - * - * .. code-block:: bash - * - * $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 - * DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A - * - * Both of those formats are acceptable. - * - * When both: - * :ref:`verify_certificate_hash - * ` and - * :ref:`verify_certificate_spki - * ` are specified, - * a hash matching value from either of the lists will result in the certificate being accepted. - */ - 'verify_certificate_hash'?: (string)[]; - /** - * An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the - * SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate - * matches one of the specified values. - * - * A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate - * can be generated with the following command: - * - * .. code-block:: bash - * - * $ openssl x509 -in path/to/client.crt -noout -pubkey - * | openssl pkey -pubin -outform DER - * | openssl dgst -sha256 -binary - * | openssl enc -base64 - * NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= - * - * This is the format used in HTTP Public Key Pinning. - * - * When both: - * :ref:`verify_certificate_hash - * ` and - * :ref:`verify_certificate_spki - * ` are specified, - * a hash matching value from either of the lists will result in the certificate being accepted. - * - * .. attention:: - * - * This option is preferred over :ref:`verify_certificate_hash - * `, - * because SPKI is tied to a private key, so it doesn't change when the certificate - * is renewed using the same private key. - */ - 'verify_certificate_spki'?: (string)[]; - /** - * An optional list of Subject Alternative Names. If specified, Envoy will verify that the - * Subject Alternative Name of the presented certificate matches one of the specified values. - * - * .. attention:: - * - * Subject Alternative Names are easily spoofable and verifying only them is insecure, - * therefore this option must be used together with :ref:`trusted_ca - * `. - */ - 'verify_subject_alt_name'?: (string)[]; - /** - * [#not-implemented-hide:] Must present a signed time-stamped OCSP response. - */ - 'require_ocsp_staple'?: (_google_protobuf_BoolValue); - /** - * [#not-implemented-hide:] Must present signed certificate time-stamp. - */ - 'require_signed_certificate_timestamp'?: (_google_protobuf_BoolValue); - /** - * An optional `certificate revocation list - * `_ - * (in PEM format). If specified, Envoy will verify that the presented peer - * certificate has not been revoked by this CRL. If this DataSource contains - * multiple CRLs, all of them will be used. - */ - 'crl'?: (_envoy_api_v2_core_DataSource); - /** - * If specified, Envoy will not reject expired certificates. - */ - 'allow_expired_certificate'?: (boolean); - /** - * An optional list of Subject Alternative name matchers. Envoy will verify that the - * Subject Alternative Name of the presented certificate matches one of the specified matches. - * - * When a certificate has wildcard DNS SAN entries, to match a specific client, it should be - * configured with exact match type in the :ref:`string matcher `. - * For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", - * it should be configured as shown below. - * - * .. code-block:: yaml - * - * match_subject_alt_names: - * exact: "api.example.com" - * - * .. attention:: - * - * Subject Alternative Names are easily spoofable and verifying only them is insecure, - * therefore this option must be used together with :ref:`trusted_ca - * `. - */ - 'match_subject_alt_names'?: (_envoy_type_matcher_StringMatcher)[]; - /** - * Certificate trust chain verification mode. - */ - 'trust_chain_verification'?: (_envoy_api_v2_auth_CertificateValidationContext_TrustChainVerification | keyof typeof _envoy_api_v2_auth_CertificateValidationContext_TrustChainVerification); -} - -/** - * [#next-free-field: 11] - */ -export interface CertificateValidationContext__Output { - /** - * TLS certificate data containing certificate authority certificates to use in verifying - * a presented peer certificate (e.g. server certificate for clusters or client certificate - * for listeners). If not specified and a peer certificate is presented it will not be - * verified. By default, a client certificate is optional, unless one of the additional - * options (:ref:`require_client_certificate - * `, - * :ref:`verify_certificate_spki - * `, - * :ref:`verify_certificate_hash - * `, or - * :ref:`match_subject_alt_names - * `) is also - * specified. - * - * It can optionally contain certificate revocation lists, in which case Envoy will verify - * that the presented peer certificate has not been revoked by one of the included CRLs. - * - * See :ref:`the TLS overview ` for a list of common - * system CA locations. - */ - 'trusted_ca'?: (_envoy_api_v2_core_DataSource__Output); - /** - * An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that - * the SHA-256 of the DER-encoded presented certificate matches one of the specified values. - * - * A hex-encoded SHA-256 of the certificate can be generated with the following command: - * - * .. code-block:: bash - * - * $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 - * df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a - * - * A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate - * can be generated with the following command: - * - * .. code-block:: bash - * - * $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 - * DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A - * - * Both of those formats are acceptable. - * - * When both: - * :ref:`verify_certificate_hash - * ` and - * :ref:`verify_certificate_spki - * ` are specified, - * a hash matching value from either of the lists will result in the certificate being accepted. - */ - 'verify_certificate_hash': (string)[]; - /** - * An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the - * SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate - * matches one of the specified values. - * - * A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate - * can be generated with the following command: - * - * .. code-block:: bash - * - * $ openssl x509 -in path/to/client.crt -noout -pubkey - * | openssl pkey -pubin -outform DER - * | openssl dgst -sha256 -binary - * | openssl enc -base64 - * NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= - * - * This is the format used in HTTP Public Key Pinning. - * - * When both: - * :ref:`verify_certificate_hash - * ` and - * :ref:`verify_certificate_spki - * ` are specified, - * a hash matching value from either of the lists will result in the certificate being accepted. - * - * .. attention:: - * - * This option is preferred over :ref:`verify_certificate_hash - * `, - * because SPKI is tied to a private key, so it doesn't change when the certificate - * is renewed using the same private key. - */ - 'verify_certificate_spki': (string)[]; - /** - * An optional list of Subject Alternative Names. If specified, Envoy will verify that the - * Subject Alternative Name of the presented certificate matches one of the specified values. - * - * .. attention:: - * - * Subject Alternative Names are easily spoofable and verifying only them is insecure, - * therefore this option must be used together with :ref:`trusted_ca - * `. - */ - 'verify_subject_alt_name': (string)[]; - /** - * [#not-implemented-hide:] Must present a signed time-stamped OCSP response. - */ - 'require_ocsp_staple'?: (_google_protobuf_BoolValue__Output); - /** - * [#not-implemented-hide:] Must present signed certificate time-stamp. - */ - 'require_signed_certificate_timestamp'?: (_google_protobuf_BoolValue__Output); - /** - * An optional `certificate revocation list - * `_ - * (in PEM format). If specified, Envoy will verify that the presented peer - * certificate has not been revoked by this CRL. If this DataSource contains - * multiple CRLs, all of them will be used. - */ - 'crl'?: (_envoy_api_v2_core_DataSource__Output); - /** - * If specified, Envoy will not reject expired certificates. - */ - 'allow_expired_certificate': (boolean); - /** - * An optional list of Subject Alternative name matchers. Envoy will verify that the - * Subject Alternative Name of the presented certificate matches one of the specified matches. - * - * When a certificate has wildcard DNS SAN entries, to match a specific client, it should be - * configured with exact match type in the :ref:`string matcher `. - * For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", - * it should be configured as shown below. - * - * .. code-block:: yaml - * - * match_subject_alt_names: - * exact: "api.example.com" - * - * .. attention:: - * - * Subject Alternative Names are easily spoofable and verifying only them is insecure, - * therefore this option must be used together with :ref:`trusted_ca - * `. - */ - 'match_subject_alt_names': (_envoy_type_matcher_StringMatcher__Output)[]; - /** - * Certificate trust chain verification mode. - */ - 'trust_chain_verification': (keyof typeof _envoy_api_v2_auth_CertificateValidationContext_TrustChainVerification); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/CommonTlsContext.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/CommonTlsContext.ts deleted file mode 100644 index 784e3d2ce..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/CommonTlsContext.ts +++ /dev/null @@ -1,140 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/auth/tls.proto - -import type { TlsParameters as _envoy_api_v2_auth_TlsParameters, TlsParameters__Output as _envoy_api_v2_auth_TlsParameters__Output } from '../../../../envoy/api/v2/auth/TlsParameters'; -import type { TlsCertificate as _envoy_api_v2_auth_TlsCertificate, TlsCertificate__Output as _envoy_api_v2_auth_TlsCertificate__Output } from '../../../../envoy/api/v2/auth/TlsCertificate'; -import type { CertificateValidationContext as _envoy_api_v2_auth_CertificateValidationContext, CertificateValidationContext__Output as _envoy_api_v2_auth_CertificateValidationContext__Output } from '../../../../envoy/api/v2/auth/CertificateValidationContext'; -import type { SdsSecretConfig as _envoy_api_v2_auth_SdsSecretConfig, SdsSecretConfig__Output as _envoy_api_v2_auth_SdsSecretConfig__Output } from '../../../../envoy/api/v2/auth/SdsSecretConfig'; - -export interface _envoy_api_v2_auth_CommonTlsContext_CombinedCertificateValidationContext { - /** - * How to validate peer certificates. - */ - 'default_validation_context'?: (_envoy_api_v2_auth_CertificateValidationContext); - /** - * Config for fetching validation context via SDS API. - */ - 'validation_context_sds_secret_config'?: (_envoy_api_v2_auth_SdsSecretConfig); -} - -export interface _envoy_api_v2_auth_CommonTlsContext_CombinedCertificateValidationContext__Output { - /** - * How to validate peer certificates. - */ - 'default_validation_context'?: (_envoy_api_v2_auth_CertificateValidationContext__Output); - /** - * Config for fetching validation context via SDS API. - */ - 'validation_context_sds_secret_config'?: (_envoy_api_v2_auth_SdsSecretConfig__Output); -} - -/** - * TLS context shared by both client and server TLS contexts. - * [#next-free-field: 9] - */ -export interface CommonTlsContext { - /** - * TLS protocol versions, cipher suites etc. - */ - 'tls_params'?: (_envoy_api_v2_auth_TlsParameters); - /** - * :ref:`Multiple TLS certificates ` can be associated with the - * same context to allow both RSA and ECDSA certificates. - * - * Only a single TLS certificate is supported in client contexts. In server contexts, the first - * RSA certificate is used for clients that only support RSA and the first ECDSA certificate is - * used for clients that support ECDSA. - */ - 'tls_certificates'?: (_envoy_api_v2_auth_TlsCertificate)[]; - /** - * How to validate peer certificates. - */ - 'validation_context'?: (_envoy_api_v2_auth_CertificateValidationContext); - /** - * Supplies the list of ALPN protocols that the listener should expose. In - * practice this is likely to be set to one of two values (see the - * :ref:`codec_type - * ` - * parameter in the HTTP connection manager for more information): - * - * * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. - * * "http/1.1" If the listener is only going to support HTTP/1.1. - * - * There is no default for this parameter. If empty, Envoy will not expose ALPN. - */ - 'alpn_protocols'?: (string)[]; - /** - * Configs for fetching TLS certificates via SDS API. - */ - 'tls_certificate_sds_secret_configs'?: (_envoy_api_v2_auth_SdsSecretConfig)[]; - /** - * Config for fetching validation context via SDS API. - */ - 'validation_context_sds_secret_config'?: (_envoy_api_v2_auth_SdsSecretConfig); - /** - * Combined certificate validation context holds a default CertificateValidationContext - * and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic - * and default CertificateValidationContext are merged into a new CertificateValidationContext - * for validation. This merge is done by Message::MergeFrom(), so dynamic - * CertificateValidationContext overwrites singular fields in default - * CertificateValidationContext, and concatenates repeated fields to default - * CertificateValidationContext, and logical OR is applied to boolean fields. - */ - 'combined_validation_context'?: (_envoy_api_v2_auth_CommonTlsContext_CombinedCertificateValidationContext); - 'validation_context_type'?: "validation_context"|"validation_context_sds_secret_config"|"combined_validation_context"; -} - -/** - * TLS context shared by both client and server TLS contexts. - * [#next-free-field: 9] - */ -export interface CommonTlsContext__Output { - /** - * TLS protocol versions, cipher suites etc. - */ - 'tls_params'?: (_envoy_api_v2_auth_TlsParameters__Output); - /** - * :ref:`Multiple TLS certificates ` can be associated with the - * same context to allow both RSA and ECDSA certificates. - * - * Only a single TLS certificate is supported in client contexts. In server contexts, the first - * RSA certificate is used for clients that only support RSA and the first ECDSA certificate is - * used for clients that support ECDSA. - */ - 'tls_certificates': (_envoy_api_v2_auth_TlsCertificate__Output)[]; - /** - * How to validate peer certificates. - */ - 'validation_context'?: (_envoy_api_v2_auth_CertificateValidationContext__Output); - /** - * Supplies the list of ALPN protocols that the listener should expose. In - * practice this is likely to be set to one of two values (see the - * :ref:`codec_type - * ` - * parameter in the HTTP connection manager for more information): - * - * * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. - * * "http/1.1" If the listener is only going to support HTTP/1.1. - * - * There is no default for this parameter. If empty, Envoy will not expose ALPN. - */ - 'alpn_protocols': (string)[]; - /** - * Configs for fetching TLS certificates via SDS API. - */ - 'tls_certificate_sds_secret_configs': (_envoy_api_v2_auth_SdsSecretConfig__Output)[]; - /** - * Config for fetching validation context via SDS API. - */ - 'validation_context_sds_secret_config'?: (_envoy_api_v2_auth_SdsSecretConfig__Output); - /** - * Combined certificate validation context holds a default CertificateValidationContext - * and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic - * and default CertificateValidationContext are merged into a new CertificateValidationContext - * for validation. This merge is done by Message::MergeFrom(), so dynamic - * CertificateValidationContext overwrites singular fields in default - * CertificateValidationContext, and concatenates repeated fields to default - * CertificateValidationContext, and logical OR is applied to boolean fields. - */ - 'combined_validation_context'?: (_envoy_api_v2_auth_CommonTlsContext_CombinedCertificateValidationContext__Output); - 'validation_context_type': "validation_context"|"validation_context_sds_secret_config"|"combined_validation_context"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/DownstreamTlsContext.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/DownstreamTlsContext.ts deleted file mode 100644 index ef9a6f9a4..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/DownstreamTlsContext.ts +++ /dev/null @@ -1,101 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/auth/tls.proto - -import type { CommonTlsContext as _envoy_api_v2_auth_CommonTlsContext, CommonTlsContext__Output as _envoy_api_v2_auth_CommonTlsContext__Output } from '../../../../envoy/api/v2/auth/CommonTlsContext'; -import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; -import type { TlsSessionTicketKeys as _envoy_api_v2_auth_TlsSessionTicketKeys, TlsSessionTicketKeys__Output as _envoy_api_v2_auth_TlsSessionTicketKeys__Output } from '../../../../envoy/api/v2/auth/TlsSessionTicketKeys'; -import type { SdsSecretConfig as _envoy_api_v2_auth_SdsSecretConfig, SdsSecretConfig__Output as _envoy_api_v2_auth_SdsSecretConfig__Output } from '../../../../envoy/api/v2/auth/SdsSecretConfig'; -import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; - -/** - * [#next-free-field: 8] - */ -export interface DownstreamTlsContext { - /** - * Common TLS context settings. - */ - 'common_tls_context'?: (_envoy_api_v2_auth_CommonTlsContext); - /** - * If specified, Envoy will reject connections without a valid client - * certificate. - */ - 'require_client_certificate'?: (_google_protobuf_BoolValue); - /** - * If specified, Envoy will reject connections without a valid and matching SNI. - * [#not-implemented-hide:] - */ - 'require_sni'?: (_google_protobuf_BoolValue); - /** - * TLS session ticket key settings. - */ - 'session_ticket_keys'?: (_envoy_api_v2_auth_TlsSessionTicketKeys); - /** - * Config for fetching TLS session ticket keys via SDS API. - */ - 'session_ticket_keys_sds_secret_config'?: (_envoy_api_v2_auth_SdsSecretConfig); - /** - * If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - * Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - * ` - * only seconds could be specified (fractional seconds are going to be ignored). - */ - 'session_timeout'?: (_google_protobuf_Duration); - /** - * Config for controlling stateless TLS session resumption: setting this to true will cause the TLS - * server to not issue TLS session tickets for the purposes of stateless TLS session resumption. - * If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using - * the keys specified through either :ref:`session_ticket_keys ` - * or :ref:`session_ticket_keys_sds_secret_config `. - * If this config is set to false and no keys are explicitly configured, the TLS server will issue - * TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the - * implication that sessions cannot be resumed across hot restarts or on different hosts. - */ - 'disable_stateless_session_resumption'?: (boolean); - 'session_ticket_keys_type'?: "session_ticket_keys"|"session_ticket_keys_sds_secret_config"|"disable_stateless_session_resumption"; -} - -/** - * [#next-free-field: 8] - */ -export interface DownstreamTlsContext__Output { - /** - * Common TLS context settings. - */ - 'common_tls_context'?: (_envoy_api_v2_auth_CommonTlsContext__Output); - /** - * If specified, Envoy will reject connections without a valid client - * certificate. - */ - 'require_client_certificate'?: (_google_protobuf_BoolValue__Output); - /** - * If specified, Envoy will reject connections without a valid and matching SNI. - * [#not-implemented-hide:] - */ - 'require_sni'?: (_google_protobuf_BoolValue__Output); - /** - * TLS session ticket key settings. - */ - 'session_ticket_keys'?: (_envoy_api_v2_auth_TlsSessionTicketKeys__Output); - /** - * Config for fetching TLS session ticket keys via SDS API. - */ - 'session_ticket_keys_sds_secret_config'?: (_envoy_api_v2_auth_SdsSecretConfig__Output); - /** - * If specified, session_timeout will change maximum lifetime (in seconds) of TLS session - * Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) - * ` - * only seconds could be specified (fractional seconds are going to be ignored). - */ - 'session_timeout'?: (_google_protobuf_Duration__Output); - /** - * Config for controlling stateless TLS session resumption: setting this to true will cause the TLS - * server to not issue TLS session tickets for the purposes of stateless TLS session resumption. - * If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using - * the keys specified through either :ref:`session_ticket_keys ` - * or :ref:`session_ticket_keys_sds_secret_config `. - * If this config is set to false and no keys are explicitly configured, the TLS server will issue - * TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the - * implication that sessions cannot be resumed across hot restarts or on different hosts. - */ - 'disable_stateless_session_resumption'?: (boolean); - 'session_ticket_keys_type': "session_ticket_keys"|"session_ticket_keys_sds_secret_config"|"disable_stateless_session_resumption"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/GenericSecret.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/GenericSecret.ts deleted file mode 100644 index d7b712525..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/GenericSecret.ts +++ /dev/null @@ -1,17 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/auth/secret.proto - -import type { DataSource as _envoy_api_v2_core_DataSource, DataSource__Output as _envoy_api_v2_core_DataSource__Output } from '../../../../envoy/api/v2/core/DataSource'; - -export interface GenericSecret { - /** - * Secret of generic type and is available to filters. - */ - 'secret'?: (_envoy_api_v2_core_DataSource); -} - -export interface GenericSecret__Output { - /** - * Secret of generic type and is available to filters. - */ - 'secret'?: (_envoy_api_v2_core_DataSource__Output); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/PrivateKeyProvider.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/PrivateKeyProvider.ts deleted file mode 100644 index 415448053..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/PrivateKeyProvider.ts +++ /dev/null @@ -1,42 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/auth/common.proto - -import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../google/protobuf/Struct'; -import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; - -/** - * BoringSSL private key method configuration. The private key methods are used for external - * (potentially asynchronous) signing and decryption operations. Some use cases for private key - * methods would be TPM support and TLS acceleration. - */ -export interface PrivateKeyProvider { - /** - * Private key method provider name. The name must match a - * supported private key method provider type. - */ - 'provider_name'?: (string); - 'config'?: (_google_protobuf_Struct); - 'typed_config'?: (_google_protobuf_Any); - /** - * Private key method provider specific configuration. - */ - 'config_type'?: "config"|"typed_config"; -} - -/** - * BoringSSL private key method configuration. The private key methods are used for external - * (potentially asynchronous) signing and decryption operations. Some use cases for private key - * methods would be TPM support and TLS acceleration. - */ -export interface PrivateKeyProvider__Output { - /** - * Private key method provider name. The name must match a - * supported private key method provider type. - */ - 'provider_name': (string); - 'config'?: (_google_protobuf_Struct__Output); - 'typed_config'?: (_google_protobuf_Any__Output); - /** - * Private key method provider specific configuration. - */ - 'config_type': "config"|"typed_config"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/SdsSecretConfig.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/SdsSecretConfig.ts deleted file mode 100644 index 888059332..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/SdsSecretConfig.ts +++ /dev/null @@ -1,23 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/auth/secret.proto - -import type { ConfigSource as _envoy_api_v2_core_ConfigSource, ConfigSource__Output as _envoy_api_v2_core_ConfigSource__Output } from '../../../../envoy/api/v2/core/ConfigSource'; - -export interface SdsSecretConfig { - /** - * Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - * When both name and config are specified, then secret can be fetched and/or reloaded via - * SDS. When only name is specified, then secret will be loaded from static resources. - */ - 'name'?: (string); - 'sds_config'?: (_envoy_api_v2_core_ConfigSource); -} - -export interface SdsSecretConfig__Output { - /** - * Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - * When both name and config are specified, then secret can be fetched and/or reloaded via - * SDS. When only name is specified, then secret will be loaded from static resources. - */ - 'name': (string); - 'sds_config'?: (_envoy_api_v2_core_ConfigSource__Output); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/Secret.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/Secret.ts deleted file mode 100644 index 0768daed8..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/Secret.ts +++ /dev/null @@ -1,36 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/auth/secret.proto - -import type { TlsCertificate as _envoy_api_v2_auth_TlsCertificate, TlsCertificate__Output as _envoy_api_v2_auth_TlsCertificate__Output } from '../../../../envoy/api/v2/auth/TlsCertificate'; -import type { TlsSessionTicketKeys as _envoy_api_v2_auth_TlsSessionTicketKeys, TlsSessionTicketKeys__Output as _envoy_api_v2_auth_TlsSessionTicketKeys__Output } from '../../../../envoy/api/v2/auth/TlsSessionTicketKeys'; -import type { CertificateValidationContext as _envoy_api_v2_auth_CertificateValidationContext, CertificateValidationContext__Output as _envoy_api_v2_auth_CertificateValidationContext__Output } from '../../../../envoy/api/v2/auth/CertificateValidationContext'; -import type { GenericSecret as _envoy_api_v2_auth_GenericSecret, GenericSecret__Output as _envoy_api_v2_auth_GenericSecret__Output } from '../../../../envoy/api/v2/auth/GenericSecret'; - -/** - * [#next-free-field: 6] - */ -export interface Secret { - /** - * Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - */ - 'name'?: (string); - 'tls_certificate'?: (_envoy_api_v2_auth_TlsCertificate); - 'session_ticket_keys'?: (_envoy_api_v2_auth_TlsSessionTicketKeys); - 'validation_context'?: (_envoy_api_v2_auth_CertificateValidationContext); - 'generic_secret'?: (_envoy_api_v2_auth_GenericSecret); - 'type'?: "tls_certificate"|"session_ticket_keys"|"validation_context"|"generic_secret"; -} - -/** - * [#next-free-field: 6] - */ -export interface Secret__Output { - /** - * Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. - */ - 'name': (string); - 'tls_certificate'?: (_envoy_api_v2_auth_TlsCertificate__Output); - 'session_ticket_keys'?: (_envoy_api_v2_auth_TlsSessionTicketKeys__Output); - 'validation_context'?: (_envoy_api_v2_auth_CertificateValidationContext__Output); - 'generic_secret'?: (_envoy_api_v2_auth_GenericSecret__Output); - 'type': "tls_certificate"|"session_ticket_keys"|"validation_context"|"generic_secret"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/TlsCertificate.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/TlsCertificate.ts deleted file mode 100644 index dd7efcf21..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/TlsCertificate.ts +++ /dev/null @@ -1,78 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/auth/common.proto - -import type { DataSource as _envoy_api_v2_core_DataSource, DataSource__Output as _envoy_api_v2_core_DataSource__Output } from '../../../../envoy/api/v2/core/DataSource'; -import type { PrivateKeyProvider as _envoy_api_v2_auth_PrivateKeyProvider, PrivateKeyProvider__Output as _envoy_api_v2_auth_PrivateKeyProvider__Output } from '../../../../envoy/api/v2/auth/PrivateKeyProvider'; - -/** - * [#next-free-field: 7] - */ -export interface TlsCertificate { - /** - * The TLS certificate chain. - */ - 'certificate_chain'?: (_envoy_api_v2_core_DataSource); - /** - * The TLS private key. - */ - 'private_key'?: (_envoy_api_v2_core_DataSource); - /** - * The password to decrypt the TLS private key. If this field is not set, it is assumed that the - * TLS private key is not password encrypted. - */ - 'password'?: (_envoy_api_v2_core_DataSource); - /** - * [#not-implemented-hide:] - */ - 'ocsp_staple'?: (_envoy_api_v2_core_DataSource); - /** - * [#not-implemented-hide:] - */ - 'signed_certificate_timestamp'?: (_envoy_api_v2_core_DataSource)[]; - /** - * BoringSSL private key method provider. This is an alternative to :ref:`private_key - * ` field. This can't be - * marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key - * ` and - * :ref:`private_key_provider - * ` fields will result in an - * error. - */ - 'private_key_provider'?: (_envoy_api_v2_auth_PrivateKeyProvider); -} - -/** - * [#next-free-field: 7] - */ -export interface TlsCertificate__Output { - /** - * The TLS certificate chain. - */ - 'certificate_chain'?: (_envoy_api_v2_core_DataSource__Output); - /** - * The TLS private key. - */ - 'private_key'?: (_envoy_api_v2_core_DataSource__Output); - /** - * The password to decrypt the TLS private key. If this field is not set, it is assumed that the - * TLS private key is not password encrypted. - */ - 'password'?: (_envoy_api_v2_core_DataSource__Output); - /** - * [#not-implemented-hide:] - */ - 'ocsp_staple'?: (_envoy_api_v2_core_DataSource__Output); - /** - * [#not-implemented-hide:] - */ - 'signed_certificate_timestamp': (_envoy_api_v2_core_DataSource__Output)[]; - /** - * BoringSSL private key method provider. This is an alternative to :ref:`private_key - * ` field. This can't be - * marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key - * ` and - * :ref:`private_key_provider - * ` fields will result in an - * error. - */ - 'private_key_provider'?: (_envoy_api_v2_auth_PrivateKeyProvider__Output); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/TlsParameters.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/TlsParameters.ts deleted file mode 100644 index 29fe8f4c8..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/TlsParameters.ts +++ /dev/null @@ -1,171 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/auth/common.proto - - -// Original file: deps/envoy-api/envoy/api/v2/auth/common.proto - -export enum _envoy_api_v2_auth_TlsParameters_TlsProtocol { - /** - * Envoy will choose the optimal TLS version. - */ - TLS_AUTO = 0, - /** - * TLS 1.0 - */ - TLSv1_0 = 1, - /** - * TLS 1.1 - */ - TLSv1_1 = 2, - /** - * TLS 1.2 - */ - TLSv1_2 = 3, - /** - * TLS 1.3 - */ - TLSv1_3 = 4, -} - -export interface TlsParameters { - /** - * Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for - * servers. - */ - 'tls_minimum_protocol_version'?: (_envoy_api_v2_auth_TlsParameters_TlsProtocol | keyof typeof _envoy_api_v2_auth_TlsParameters_TlsProtocol); - /** - * Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and - * ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. - */ - 'tls_maximum_protocol_version'?: (_envoy_api_v2_auth_TlsParameters_TlsProtocol | keyof typeof _envoy_api_v2_auth_TlsParameters_TlsProtocol); - /** - * If specified, the TLS listener will only support the specified `cipher list - * `_ - * when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not - * specified, the default list will be used. - * - * In non-FIPS builds, the default cipher list is: - * - * .. code-block:: none - * - * [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] - * [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] - * ECDHE-ECDSA-AES128-SHA - * ECDHE-RSA-AES128-SHA - * AES128-GCM-SHA256 - * AES128-SHA - * ECDHE-ECDSA-AES256-GCM-SHA384 - * ECDHE-RSA-AES256-GCM-SHA384 - * ECDHE-ECDSA-AES256-SHA - * ECDHE-RSA-AES256-SHA - * AES256-GCM-SHA384 - * AES256-SHA - * - * In builds using :ref:`BoringSSL FIPS `, the default cipher list is: - * - * .. code-block:: none - * - * ECDHE-ECDSA-AES128-GCM-SHA256 - * ECDHE-RSA-AES128-GCM-SHA256 - * ECDHE-ECDSA-AES128-SHA - * ECDHE-RSA-AES128-SHA - * AES128-GCM-SHA256 - * AES128-SHA - * ECDHE-ECDSA-AES256-GCM-SHA384 - * ECDHE-RSA-AES256-GCM-SHA384 - * ECDHE-ECDSA-AES256-SHA - * ECDHE-RSA-AES256-SHA - * AES256-GCM-SHA384 - * AES256-SHA - */ - 'cipher_suites'?: (string)[]; - /** - * If specified, the TLS connection will only support the specified ECDH - * curves. If not specified, the default curves will be used. - * - * In non-FIPS builds, the default curves are: - * - * .. code-block:: none - * - * X25519 - * P-256 - * - * In builds using :ref:`BoringSSL FIPS `, the default curve is: - * - * .. code-block:: none - * - * P-256 - */ - 'ecdh_curves'?: (string)[]; -} - -export interface TlsParameters__Output { - /** - * Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for - * servers. - */ - 'tls_minimum_protocol_version': (keyof typeof _envoy_api_v2_auth_TlsParameters_TlsProtocol); - /** - * Maximum TLS protocol version. By default, it's ``TLSv1_3`` for servers in non-FIPS builds, and - * ``TLSv1_2`` for clients and for servers using :ref:`BoringSSL FIPS `. - */ - 'tls_maximum_protocol_version': (keyof typeof _envoy_api_v2_auth_TlsParameters_TlsProtocol); - /** - * If specified, the TLS listener will only support the specified `cipher list - * `_ - * when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not - * specified, the default list will be used. - * - * In non-FIPS builds, the default cipher list is: - * - * .. code-block:: none - * - * [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] - * [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] - * ECDHE-ECDSA-AES128-SHA - * ECDHE-RSA-AES128-SHA - * AES128-GCM-SHA256 - * AES128-SHA - * ECDHE-ECDSA-AES256-GCM-SHA384 - * ECDHE-RSA-AES256-GCM-SHA384 - * ECDHE-ECDSA-AES256-SHA - * ECDHE-RSA-AES256-SHA - * AES256-GCM-SHA384 - * AES256-SHA - * - * In builds using :ref:`BoringSSL FIPS `, the default cipher list is: - * - * .. code-block:: none - * - * ECDHE-ECDSA-AES128-GCM-SHA256 - * ECDHE-RSA-AES128-GCM-SHA256 - * ECDHE-ECDSA-AES128-SHA - * ECDHE-RSA-AES128-SHA - * AES128-GCM-SHA256 - * AES128-SHA - * ECDHE-ECDSA-AES256-GCM-SHA384 - * ECDHE-RSA-AES256-GCM-SHA384 - * ECDHE-ECDSA-AES256-SHA - * ECDHE-RSA-AES256-SHA - * AES256-GCM-SHA384 - * AES256-SHA - */ - 'cipher_suites': (string)[]; - /** - * If specified, the TLS connection will only support the specified ECDH - * curves. If not specified, the default curves will be used. - * - * In non-FIPS builds, the default curves are: - * - * .. code-block:: none - * - * X25519 - * P-256 - * - * In builds using :ref:`BoringSSL FIPS `, the default curve is: - * - * .. code-block:: none - * - * P-256 - */ - 'ecdh_curves': (string)[]; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/TlsSessionTicketKeys.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/TlsSessionTicketKeys.ts deleted file mode 100644 index d4bedd682..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/TlsSessionTicketKeys.ts +++ /dev/null @@ -1,61 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/auth/common.proto - -import type { DataSource as _envoy_api_v2_core_DataSource, DataSource__Output as _envoy_api_v2_core_DataSource__Output } from '../../../../envoy/api/v2/core/DataSource'; - -export interface TlsSessionTicketKeys { - /** - * Keys for encrypting and decrypting TLS session tickets. The - * first key in the array contains the key to encrypt all new sessions created by this context. - * All keys are candidates for decrypting received tickets. This allows for easy rotation of keys - * by, for example, putting the new key first, and the previous key second. - * - * If :ref:`session_ticket_keys ` - * is not specified, the TLS library will still support resuming sessions via tickets, but it will - * use an internally-generated and managed key, so sessions cannot be resumed across hot restarts - * or on different hosts. - * - * Each key must contain exactly 80 bytes of cryptographically-secure random data. For - * example, the output of ``openssl rand 80``. - * - * .. attention:: - * - * Using this feature has serious security considerations and risks. Improper handling of keys - * may result in loss of secrecy in connections, even if ciphers supporting perfect forward - * secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some - * discussion. To minimize the risk, you must: - * - * * Keep the session ticket keys at least as secure as your TLS certificate private keys - * * Rotate session ticket keys at least daily, and preferably hourly - * * Always generate keys using a cryptographically-secure random data source - */ - 'keys'?: (_envoy_api_v2_core_DataSource)[]; -} - -export interface TlsSessionTicketKeys__Output { - /** - * Keys for encrypting and decrypting TLS session tickets. The - * first key in the array contains the key to encrypt all new sessions created by this context. - * All keys are candidates for decrypting received tickets. This allows for easy rotation of keys - * by, for example, putting the new key first, and the previous key second. - * - * If :ref:`session_ticket_keys ` - * is not specified, the TLS library will still support resuming sessions via tickets, but it will - * use an internally-generated and managed key, so sessions cannot be resumed across hot restarts - * or on different hosts. - * - * Each key must contain exactly 80 bytes of cryptographically-secure random data. For - * example, the output of ``openssl rand 80``. - * - * .. attention:: - * - * Using this feature has serious security considerations and risks. Improper handling of keys - * may result in loss of secrecy in connections, even if ciphers supporting perfect forward - * secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some - * discussion. To minimize the risk, you must: - * - * * Keep the session ticket keys at least as secure as your TLS certificate private keys - * * Rotate session ticket keys at least daily, and preferably hourly - * * Always generate keys using a cryptographically-secure random data source - */ - 'keys': (_envoy_api_v2_core_DataSource__Output)[]; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/UpstreamTlsContext.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/UpstreamTlsContext.ts deleted file mode 100644 index b9dc4414f..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/auth/UpstreamTlsContext.ts +++ /dev/null @@ -1,68 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/auth/tls.proto - -import type { CommonTlsContext as _envoy_api_v2_auth_CommonTlsContext, CommonTlsContext__Output as _envoy_api_v2_auth_CommonTlsContext__Output } from '../../../../envoy/api/v2/auth/CommonTlsContext'; -import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; - -export interface UpstreamTlsContext { - /** - * Common TLS context settings. - * - * .. attention:: - * - * Server certificate verification is not enabled by default. Configure - * :ref:`trusted_ca` to enable - * verification. - */ - 'common_tls_context'?: (_envoy_api_v2_auth_CommonTlsContext); - /** - * SNI string to use when creating TLS backend connections. - */ - 'sni'?: (string); - /** - * If true, server-initiated TLS renegotiation will be allowed. - * - * .. attention:: - * - * TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. - */ - 'allow_renegotiation'?: (boolean); - /** - * Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets - * for TLSv1.2 and older) to store for the purpose of session resumption. - * - * Defaults to 1, setting this to 0 disables session resumption. - */ - 'max_session_keys'?: (_google_protobuf_UInt32Value); -} - -export interface UpstreamTlsContext__Output { - /** - * Common TLS context settings. - * - * .. attention:: - * - * Server certificate verification is not enabled by default. Configure - * :ref:`trusted_ca` to enable - * verification. - */ - 'common_tls_context'?: (_envoy_api_v2_auth_CommonTlsContext__Output); - /** - * SNI string to use when creating TLS backend connections. - */ - 'sni': (string); - /** - * If true, server-initiated TLS renegotiation will be allowed. - * - * .. attention:: - * - * TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. - */ - 'allow_renegotiation': (boolean); - /** - * Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets - * for TLSv1.2 and older) to store for the purpose of session resumption. - * - * Defaults to 1, setting this to 0 disables session resumption. - */ - 'max_session_keys'?: (_google_protobuf_UInt32Value__Output); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/Address.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/core/Address.ts deleted file mode 100644 index b12c8d9b0..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/Address.ts +++ /dev/null @@ -1,26 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/address.proto - -import type { SocketAddress as _envoy_api_v2_core_SocketAddress, SocketAddress__Output as _envoy_api_v2_core_SocketAddress__Output } from '../../../../envoy/api/v2/core/SocketAddress'; -import type { Pipe as _envoy_api_v2_core_Pipe, Pipe__Output as _envoy_api_v2_core_Pipe__Output } from '../../../../envoy/api/v2/core/Pipe'; - -/** - * Addresses specify either a logical or physical address and port, which are - * used to tell Envoy where to bind/listen, connect to upstream and find - * management servers. - */ -export interface Address { - 'socket_address'?: (_envoy_api_v2_core_SocketAddress); - 'pipe'?: (_envoy_api_v2_core_Pipe); - 'address'?: "socket_address"|"pipe"; -} - -/** - * Addresses specify either a logical or physical address and port, which are - * used to tell Envoy where to bind/listen, connect to upstream and find - * management servers. - */ -export interface Address__Output { - 'socket_address'?: (_envoy_api_v2_core_SocketAddress__Output); - 'pipe'?: (_envoy_api_v2_core_Pipe__Output); - 'address': "socket_address"|"pipe"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/ApiConfigSource.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/core/ApiConfigSource.ts deleted file mode 100644 index a99924d9b..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/ApiConfigSource.ts +++ /dev/null @@ -1,135 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/config_source.proto - -import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; -import type { GrpcService as _envoy_api_v2_core_GrpcService, GrpcService__Output as _envoy_api_v2_core_GrpcService__Output } from '../../../../envoy/api/v2/core/GrpcService'; -import type { RateLimitSettings as _envoy_api_v2_core_RateLimitSettings, RateLimitSettings__Output as _envoy_api_v2_core_RateLimitSettings__Output } from '../../../../envoy/api/v2/core/RateLimitSettings'; -import type { ApiVersion as _envoy_api_v2_core_ApiVersion } from '../../../../envoy/api/v2/core/ApiVersion'; - -// Original file: deps/envoy-api/envoy/api/v2/core/config_source.proto - -/** - * APIs may be fetched via either REST or gRPC. - */ -export enum _envoy_api_v2_core_ApiConfigSource_ApiType { - /** - * Ideally this would be 'reserved 0' but one can't reserve the default - * value. Instead we throw an exception if this is ever used. - */ - UNSUPPORTED_REST_LEGACY = 0, - /** - * REST-JSON v2 API. The `canonical JSON encoding - * `_ for - * the v2 protos is used. - */ - REST = 1, - /** - * gRPC v2 API. - */ - GRPC = 2, - /** - * Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} - * rather than Discovery{Request,Response}. Rather than sending Envoy the entire state - * with every update, the xDS server only sends what has changed since the last update. - */ - DELTA_GRPC = 3, -} - -/** - * API configuration source. This identifies the API type and cluster that Envoy - * will use to fetch an xDS API. - * [#next-free-field: 9] - */ -export interface ApiConfigSource { - /** - * API type (gRPC, REST, delta gRPC) - */ - 'api_type'?: (_envoy_api_v2_core_ApiConfigSource_ApiType | keyof typeof _envoy_api_v2_core_ApiConfigSource_ApiType); - /** - * Cluster names should be used only with REST. If > 1 - * cluster is defined, clusters will be cycled through if any kind of failure - * occurs. - * - * .. note:: - * - * The cluster with name ``cluster_name`` must be statically defined and its - * type must not be ``EDS``. - */ - 'cluster_names'?: (string)[]; - /** - * For REST APIs, the delay between successive polls. - */ - 'refresh_delay'?: (_google_protobuf_Duration); - /** - * Multiple gRPC services be provided for GRPC. If > 1 cluster is defined, - * services will be cycled through if any kind of failure occurs. - */ - 'grpc_services'?: (_envoy_api_v2_core_GrpcService)[]; - /** - * For REST APIs, the request timeout. If not set, a default value of 1s will be used. - */ - 'request_timeout'?: (_google_protobuf_Duration); - /** - * For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be - * rate limited. - */ - 'rate_limit_settings'?: (_envoy_api_v2_core_RateLimitSettings); - /** - * Skip the node identifier in subsequent discovery requests for streaming gRPC config types. - */ - 'set_node_on_first_message_only'?: (boolean); - /** - * API version for xDS transport protocol. This describes the xDS gRPC/REST - * endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. - */ - 'transport_api_version'?: (_envoy_api_v2_core_ApiVersion | keyof typeof _envoy_api_v2_core_ApiVersion); -} - -/** - * API configuration source. This identifies the API type and cluster that Envoy - * will use to fetch an xDS API. - * [#next-free-field: 9] - */ -export interface ApiConfigSource__Output { - /** - * API type (gRPC, REST, delta gRPC) - */ - 'api_type': (keyof typeof _envoy_api_v2_core_ApiConfigSource_ApiType); - /** - * Cluster names should be used only with REST. If > 1 - * cluster is defined, clusters will be cycled through if any kind of failure - * occurs. - * - * .. note:: - * - * The cluster with name ``cluster_name`` must be statically defined and its - * type must not be ``EDS``. - */ - 'cluster_names': (string)[]; - /** - * For REST APIs, the delay between successive polls. - */ - 'refresh_delay'?: (_google_protobuf_Duration__Output); - /** - * Multiple gRPC services be provided for GRPC. If > 1 cluster is defined, - * services will be cycled through if any kind of failure occurs. - */ - 'grpc_services': (_envoy_api_v2_core_GrpcService__Output)[]; - /** - * For REST APIs, the request timeout. If not set, a default value of 1s will be used. - */ - 'request_timeout'?: (_google_protobuf_Duration__Output); - /** - * For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be - * rate limited. - */ - 'rate_limit_settings'?: (_envoy_api_v2_core_RateLimitSettings__Output); - /** - * Skip the node identifier in subsequent discovery requests for streaming gRPC config types. - */ - 'set_node_on_first_message_only': (boolean); - /** - * API version for xDS transport protocol. This describes the xDS gRPC/REST - * endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. - */ - 'transport_api_version': (keyof typeof _envoy_api_v2_core_ApiVersion); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/ApiVersion.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/core/ApiVersion.ts deleted file mode 100644 index 7f03a5995..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/ApiVersion.ts +++ /dev/null @@ -1,22 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/config_source.proto - -/** - * xDS API version. This is used to describe both resource and transport - * protocol versions (in distinct configuration fields). - */ -export enum ApiVersion { - /** - * When not specified, we assume v2, to ease migration to Envoy's stable API - * versioning. If a client does not support v2 (e.g. due to deprecation), this - * is an invalid value. - */ - AUTO = 0, - /** - * Use xDS v2 API. - */ - V2 = 1, - /** - * Use xDS v3 API. - */ - V3 = 2, -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/AsyncDataSource.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/core/AsyncDataSource.ts deleted file mode 100644 index fb312bb40..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/AsyncDataSource.ts +++ /dev/null @@ -1,34 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/base.proto - -import type { DataSource as _envoy_api_v2_core_DataSource, DataSource__Output as _envoy_api_v2_core_DataSource__Output } from '../../../../envoy/api/v2/core/DataSource'; -import type { RemoteDataSource as _envoy_api_v2_core_RemoteDataSource, RemoteDataSource__Output as _envoy_api_v2_core_RemoteDataSource__Output } from '../../../../envoy/api/v2/core/RemoteDataSource'; - -/** - * Async data source which support async data fetch. - */ -export interface AsyncDataSource { - /** - * Local async data source. - */ - 'local'?: (_envoy_api_v2_core_DataSource); - /** - * Remote async data source. - */ - 'remote'?: (_envoy_api_v2_core_RemoteDataSource); - 'specifier'?: "local"|"remote"; -} - -/** - * Async data source which support async data fetch. - */ -export interface AsyncDataSource__Output { - /** - * Local async data source. - */ - 'local'?: (_envoy_api_v2_core_DataSource__Output); - /** - * Remote async data source. - */ - 'remote'?: (_envoy_api_v2_core_RemoteDataSource__Output); - 'specifier': "local"|"remote"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/BindConfig.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/core/BindConfig.ts deleted file mode 100644 index f989494ea..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/BindConfig.ts +++ /dev/null @@ -1,49 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/address.proto - -import type { SocketAddress as _envoy_api_v2_core_SocketAddress, SocketAddress__Output as _envoy_api_v2_core_SocketAddress__Output } from '../../../../envoy/api/v2/core/SocketAddress'; -import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; -import type { SocketOption as _envoy_api_v2_core_SocketOption, SocketOption__Output as _envoy_api_v2_core_SocketOption__Output } from '../../../../envoy/api/v2/core/SocketOption'; - -export interface BindConfig { - /** - * The address to bind to when creating a socket. - */ - 'source_address'?: (_envoy_api_v2_core_SocketAddress); - /** - * Whether to set the *IP_FREEBIND* option when creating the socket. When this - * flag is set to true, allows the :ref:`source_address - * ` to be an IP address - * that is not configured on the system running Envoy. When this flag is set - * to false, the option *IP_FREEBIND* is disabled on the socket. When this - * flag is not set (default), the socket is not modified, i.e. the option is - * neither enabled nor disabled. - */ - 'freebind'?: (_google_protobuf_BoolValue); - /** - * Additional socket options that may not be present in Envoy source code or - * precompiled binaries. - */ - 'socket_options'?: (_envoy_api_v2_core_SocketOption)[]; -} - -export interface BindConfig__Output { - /** - * The address to bind to when creating a socket. - */ - 'source_address'?: (_envoy_api_v2_core_SocketAddress__Output); - /** - * Whether to set the *IP_FREEBIND* option when creating the socket. When this - * flag is set to true, allows the :ref:`source_address - * ` to be an IP address - * that is not configured on the system running Envoy. When this flag is set - * to false, the option *IP_FREEBIND* is disabled on the socket. When this - * flag is not set (default), the socket is not modified, i.e. the option is - * neither enabled nor disabled. - */ - 'freebind'?: (_google_protobuf_BoolValue__Output); - /** - * Additional socket options that may not be present in Envoy source code or - * precompiled binaries. - */ - 'socket_options': (_envoy_api_v2_core_SocketOption__Output)[]; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/GrpcProtocolOptions.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/core/GrpcProtocolOptions.ts deleted file mode 100644 index 66723801e..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/GrpcProtocolOptions.ts +++ /dev/null @@ -1,17 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/protocol.proto - -import type { Http2ProtocolOptions as _envoy_api_v2_core_Http2ProtocolOptions, Http2ProtocolOptions__Output as _envoy_api_v2_core_Http2ProtocolOptions__Output } from '../../../../envoy/api/v2/core/Http2ProtocolOptions'; - -/** - * [#not-implemented-hide:] - */ -export interface GrpcProtocolOptions { - 'http2_protocol_options'?: (_envoy_api_v2_core_Http2ProtocolOptions); -} - -/** - * [#not-implemented-hide:] - */ -export interface GrpcProtocolOptions__Output { - 'http2_protocol_options'?: (_envoy_api_v2_core_Http2ProtocolOptions__Output); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/HeaderMap.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/core/HeaderMap.ts deleted file mode 100644 index e093d4761..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/HeaderMap.ts +++ /dev/null @@ -1,17 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/base.proto - -import type { HeaderValue as _envoy_api_v2_core_HeaderValue, HeaderValue__Output as _envoy_api_v2_core_HeaderValue__Output } from '../../../../envoy/api/v2/core/HeaderValue'; - -/** - * Wrapper for a set of headers. - */ -export interface HeaderMap { - 'headers'?: (_envoy_api_v2_core_HeaderValue)[]; -} - -/** - * Wrapper for a set of headers. - */ -export interface HeaderMap__Output { - 'headers': (_envoy_api_v2_core_HeaderValue__Output)[]; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/HeaderValueOption.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/core/HeaderValueOption.ts deleted file mode 100644 index b90bd85e1..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/HeaderValueOption.ts +++ /dev/null @@ -1,34 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/base.proto - -import type { HeaderValue as _envoy_api_v2_core_HeaderValue, HeaderValue__Output as _envoy_api_v2_core_HeaderValue__Output } from '../../../../envoy/api/v2/core/HeaderValue'; -import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; - -/** - * Header name/value pair plus option to control append behavior. - */ -export interface HeaderValueOption { - /** - * Header name/value pair that this option applies to. - */ - 'header'?: (_envoy_api_v2_core_HeaderValue); - /** - * Should the value be appended? If true (default), the value is appended to - * existing values. - */ - 'append'?: (_google_protobuf_BoolValue); -} - -/** - * Header name/value pair plus option to control append behavior. - */ -export interface HeaderValueOption__Output { - /** - * Header name/value pair that this option applies to. - */ - 'header'?: (_envoy_api_v2_core_HeaderValue__Output); - /** - * Should the value be appended? If true (default), the value is appended to - * existing values. - */ - 'append'?: (_google_protobuf_BoolValue__Output); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/HealthCheck.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/core/HealthCheck.ts deleted file mode 100644 index 0b45042f3..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/HealthCheck.ts +++ /dev/null @@ -1,607 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/health_check.proto - -import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; -import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; -import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; -import type { EventServiceConfig as _envoy_api_v2_core_EventServiceConfig, EventServiceConfig__Output as _envoy_api_v2_core_EventServiceConfig__Output } from '../../../../envoy/api/v2/core/EventServiceConfig'; -import type { HeaderValueOption as _envoy_api_v2_core_HeaderValueOption, HeaderValueOption__Output as _envoy_api_v2_core_HeaderValueOption__Output } from '../../../../envoy/api/v2/core/HeaderValueOption'; -import type { Int64Range as _envoy_type_Int64Range, Int64Range__Output as _envoy_type_Int64Range__Output } from '../../../../envoy/type/Int64Range'; -import type { CodecClientType as _envoy_type_CodecClientType } from '../../../../envoy/type/CodecClientType'; -import type { StringMatcher as _envoy_type_matcher_StringMatcher, StringMatcher__Output as _envoy_type_matcher_StringMatcher__Output } from '../../../../envoy/type/matcher/StringMatcher'; -import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../google/protobuf/Struct'; -import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; -import type { Long } from '@grpc/proto-loader'; - -/** - * Custom health check. - */ -export interface _envoy_api_v2_core_HealthCheck_CustomHealthCheck { - /** - * The registered name of the custom health checker. - */ - 'name'?: (string); - 'config'?: (_google_protobuf_Struct); - 'typed_config'?: (_google_protobuf_Any); - /** - * A custom health checker specific configuration which depends on the custom health checker - * being instantiated. See :api:`envoy/config/health_checker` for reference. - */ - 'config_type'?: "config"|"typed_config"; -} - -/** - * Custom health check. - */ -export interface _envoy_api_v2_core_HealthCheck_CustomHealthCheck__Output { - /** - * The registered name of the custom health checker. - */ - 'name': (string); - 'config'?: (_google_protobuf_Struct__Output); - 'typed_config'?: (_google_protobuf_Any__Output); - /** - * A custom health checker specific configuration which depends on the custom health checker - * being instantiated. See :api:`envoy/config/health_checker` for reference. - */ - 'config_type': "config"|"typed_config"; -} - -/** - * `grpc.health.v1.Health - * `_-based - * healthcheck. See `gRPC doc `_ - * for details. - */ -export interface _envoy_api_v2_core_HealthCheck_GrpcHealthCheck { - /** - * An optional service name parameter which will be sent to gRPC service in - * `grpc.health.v1.HealthCheckRequest - * `_. - * message. See `gRPC health-checking overview - * `_ for more information. - */ - 'service_name'?: (string); - /** - * The value of the :authority header in the gRPC health check request. If - * left empty (default value), the name of the cluster this health check is associated - * with will be used. The authority header can be customized for a specific endpoint by setting - * the :ref:`hostname ` field. - */ - 'authority'?: (string); -} - -/** - * `grpc.health.v1.Health - * `_-based - * healthcheck. See `gRPC doc `_ - * for details. - */ -export interface _envoy_api_v2_core_HealthCheck_GrpcHealthCheck__Output { - /** - * An optional service name parameter which will be sent to gRPC service in - * `grpc.health.v1.HealthCheckRequest - * `_. - * message. See `gRPC health-checking overview - * `_ for more information. - */ - 'service_name': (string); - /** - * The value of the :authority header in the gRPC health check request. If - * left empty (default value), the name of the cluster this health check is associated - * with will be used. The authority header can be customized for a specific endpoint by setting - * the :ref:`hostname ` field. - */ - 'authority': (string); -} - -/** - * [#next-free-field: 12] - */ -export interface _envoy_api_v2_core_HealthCheck_HttpHealthCheck { - /** - * The value of the host header in the HTTP health check request. If - * left empty (default value), the name of the cluster this health check is associated - * with will be used. The host header can be customized for a specific endpoint by setting the - * :ref:`hostname ` field. - */ - 'host'?: (string); - /** - * Specifies the HTTP path that will be requested during health checking. For example - * * /healthcheck*. - */ - 'path'?: (string); - /** - * [#not-implemented-hide:] HTTP specific payload. - */ - 'send'?: (_envoy_api_v2_core_HealthCheck_Payload); - /** - * [#not-implemented-hide:] HTTP specific response. - */ - 'receive'?: (_envoy_api_v2_core_HealthCheck_Payload); - /** - * An optional service name parameter which is used to validate the identity of - * the health checked cluster. See the :ref:`architecture overview - * ` for more information. - * - * .. attention:: - * - * This field has been deprecated in favor of `service_name_matcher` for better flexibility - * over matching with service-cluster name. - */ - 'service_name'?: (string); - /** - * Specifies a list of HTTP headers that should be added to each request that is sent to the - * health checked cluster. For more information, including details on header value syntax, see - * the documentation on :ref:`custom request headers - * `. - */ - 'request_headers_to_add'?: (_envoy_api_v2_core_HeaderValueOption)[]; - /** - * Specifies a list of HTTP headers that should be removed from each request that is sent to the - * health checked cluster. - */ - 'request_headers_to_remove'?: (string)[]; - /** - * If set, health checks will be made using http/2. - * Deprecated, use :ref:`codec_client_type - * ` instead. - */ - 'use_http2'?: (boolean); - /** - * Specifies a list of HTTP response statuses considered healthy. If provided, replaces default - * 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open - * semantics of :ref:`Int64Range `. The start and end of each - * range are required. Only statuses in the range [100, 600) are allowed. - */ - 'expected_statuses'?: (_envoy_type_Int64Range)[]; - /** - * Use specified application protocol for health checks. - */ - 'codec_client_type'?: (_envoy_type_CodecClientType | keyof typeof _envoy_type_CodecClientType); - /** - * An optional service name parameter which is used to validate the identity of - * the health checked cluster using a :ref:`StringMatcher - * `. See the :ref:`architecture overview - * ` for more information. - */ - 'service_name_matcher'?: (_envoy_type_matcher_StringMatcher); -} - -/** - * [#next-free-field: 12] - */ -export interface _envoy_api_v2_core_HealthCheck_HttpHealthCheck__Output { - /** - * The value of the host header in the HTTP health check request. If - * left empty (default value), the name of the cluster this health check is associated - * with will be used. The host header can be customized for a specific endpoint by setting the - * :ref:`hostname ` field. - */ - 'host': (string); - /** - * Specifies the HTTP path that will be requested during health checking. For example - * * /healthcheck*. - */ - 'path': (string); - /** - * [#not-implemented-hide:] HTTP specific payload. - */ - 'send'?: (_envoy_api_v2_core_HealthCheck_Payload__Output); - /** - * [#not-implemented-hide:] HTTP specific response. - */ - 'receive'?: (_envoy_api_v2_core_HealthCheck_Payload__Output); - /** - * An optional service name parameter which is used to validate the identity of - * the health checked cluster. See the :ref:`architecture overview - * ` for more information. - * - * .. attention:: - * - * This field has been deprecated in favor of `service_name_matcher` for better flexibility - * over matching with service-cluster name. - */ - 'service_name': (string); - /** - * Specifies a list of HTTP headers that should be added to each request that is sent to the - * health checked cluster. For more information, including details on header value syntax, see - * the documentation on :ref:`custom request headers - * `. - */ - 'request_headers_to_add': (_envoy_api_v2_core_HeaderValueOption__Output)[]; - /** - * Specifies a list of HTTP headers that should be removed from each request that is sent to the - * health checked cluster. - */ - 'request_headers_to_remove': (string)[]; - /** - * If set, health checks will be made using http/2. - * Deprecated, use :ref:`codec_client_type - * ` instead. - */ - 'use_http2': (boolean); - /** - * Specifies a list of HTTP response statuses considered healthy. If provided, replaces default - * 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open - * semantics of :ref:`Int64Range `. The start and end of each - * range are required. Only statuses in the range [100, 600) are allowed. - */ - 'expected_statuses': (_envoy_type_Int64Range__Output)[]; - /** - * Use specified application protocol for health checks. - */ - 'codec_client_type': (keyof typeof _envoy_type_CodecClientType); - /** - * An optional service name parameter which is used to validate the identity of - * the health checked cluster using a :ref:`StringMatcher - * `. See the :ref:`architecture overview - * ` for more information. - */ - 'service_name_matcher'?: (_envoy_type_matcher_StringMatcher__Output); -} - -/** - * Describes the encoding of the payload bytes in the payload. - */ -export interface _envoy_api_v2_core_HealthCheck_Payload { - /** - * Hex encoded payload. E.g., "000000FF". - */ - 'text'?: (string); - /** - * [#not-implemented-hide:] Binary payload. - */ - 'binary'?: (Buffer | Uint8Array | string); - 'payload'?: "text"|"binary"; -} - -/** - * Describes the encoding of the payload bytes in the payload. - */ -export interface _envoy_api_v2_core_HealthCheck_Payload__Output { - /** - * Hex encoded payload. E.g., "000000FF". - */ - 'text'?: (string); - /** - * [#not-implemented-hide:] Binary payload. - */ - 'binary'?: (Buffer); - 'payload': "text"|"binary"; -} - -export interface _envoy_api_v2_core_HealthCheck_RedisHealthCheck { - /** - * If set, optionally perform ``EXISTS `` instead of ``PING``. A return value - * from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other - * than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance - * by setting the specified key to any value and waiting for traffic to drain. - */ - 'key'?: (string); -} - -export interface _envoy_api_v2_core_HealthCheck_RedisHealthCheck__Output { - /** - * If set, optionally perform ``EXISTS `` instead of ``PING``. A return value - * from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other - * than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance - * by setting the specified key to any value and waiting for traffic to drain. - */ - 'key': (string); -} - -export interface _envoy_api_v2_core_HealthCheck_TcpHealthCheck { - /** - * Empty payloads imply a connect-only health check. - */ - 'send'?: (_envoy_api_v2_core_HealthCheck_Payload); - /** - * When checking the response, “fuzzy” matching is performed such that each - * binary block must be found, and in the order specified, but not - * necessarily contiguous. - */ - 'receive'?: (_envoy_api_v2_core_HealthCheck_Payload)[]; -} - -export interface _envoy_api_v2_core_HealthCheck_TcpHealthCheck__Output { - /** - * Empty payloads imply a connect-only health check. - */ - 'send'?: (_envoy_api_v2_core_HealthCheck_Payload__Output); - /** - * When checking the response, “fuzzy” matching is performed such that each - * binary block must be found, and in the order specified, but not - * necessarily contiguous. - */ - 'receive': (_envoy_api_v2_core_HealthCheck_Payload__Output)[]; -} - -/** - * Health checks occur over the transport socket specified for the cluster. This implies that if a - * cluster is using a TLS-enabled transport socket, the health check will also occur over TLS. - * - * This allows overriding the cluster TLS settings, just for health check connections. - */ -export interface _envoy_api_v2_core_HealthCheck_TlsOptions { - /** - * Specifies the ALPN protocols for health check connections. This is useful if the - * corresponding upstream is using ALPN-based :ref:`FilterChainMatch - * ` along with different protocols for health checks - * versus data connections. If empty, no ALPN protocols will be set on health check connections. - */ - 'alpn_protocols'?: (string)[]; -} - -/** - * Health checks occur over the transport socket specified for the cluster. This implies that if a - * cluster is using a TLS-enabled transport socket, the health check will also occur over TLS. - * - * This allows overriding the cluster TLS settings, just for health check connections. - */ -export interface _envoy_api_v2_core_HealthCheck_TlsOptions__Output { - /** - * Specifies the ALPN protocols for health check connections. This is useful if the - * corresponding upstream is using ALPN-based :ref:`FilterChainMatch - * ` along with different protocols for health checks - * versus data connections. If empty, no ALPN protocols will be set on health check connections. - */ - 'alpn_protocols': (string)[]; -} - -/** - * [#next-free-field: 23] - */ -export interface HealthCheck { - /** - * The time to wait for a health check response. If the timeout is reached the - * health check attempt will be considered a failure. - */ - 'timeout'?: (_google_protobuf_Duration); - /** - * The interval between health checks. - */ - 'interval'?: (_google_protobuf_Duration); - /** - * An optional jitter amount in milliseconds. If specified, during every - * interval Envoy will add interval_jitter to the wait time. - */ - 'interval_jitter'?: (_google_protobuf_Duration); - /** - * The number of unhealthy health checks required before a host is marked - * unhealthy. Note that for *http* health checking if a host responds with 503 - * this threshold is ignored and the host is considered unhealthy immediately. - */ - 'unhealthy_threshold'?: (_google_protobuf_UInt32Value); - /** - * The number of healthy health checks required before a host is marked - * healthy. Note that during startup, only a single successful health check is - * required to mark a host healthy. - */ - 'healthy_threshold'?: (_google_protobuf_UInt32Value); - /** - * [#not-implemented-hide:] Non-serving port for health checking. - */ - 'alt_port'?: (_google_protobuf_UInt32Value); - /** - * Reuse health check connection between health checks. Default is true. - */ - 'reuse_connection'?: (_google_protobuf_BoolValue); - /** - * HTTP health check. - */ - 'http_health_check'?: (_envoy_api_v2_core_HealthCheck_HttpHealthCheck); - /** - * TCP health check. - */ - 'tcp_health_check'?: (_envoy_api_v2_core_HealthCheck_TcpHealthCheck); - /** - * gRPC health check. - */ - 'grpc_health_check'?: (_envoy_api_v2_core_HealthCheck_GrpcHealthCheck); - /** - * The "no traffic interval" is a special health check interval that is used when a cluster has - * never had traffic routed to it. This lower interval allows cluster information to be kept up to - * date, without sending a potentially large amount of active health checking traffic for no - * reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the - * standard health check interval that is defined. Note that this interval takes precedence over - * any other. - * - * The default value for "no traffic interval" is 60 seconds. - */ - 'no_traffic_interval'?: (_google_protobuf_Duration); - /** - * Custom health check. - */ - 'custom_health_check'?: (_envoy_api_v2_core_HealthCheck_CustomHealthCheck); - /** - * The "unhealthy interval" is a health check interval that is used for hosts that are marked as - * unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the - * standard health check interval that is defined. - * - * The default value for "unhealthy interval" is the same as "interval". - */ - 'unhealthy_interval'?: (_google_protobuf_Duration); - /** - * The "unhealthy edge interval" is a special health check interval that is used for the first - * health check right after a host is marked as unhealthy. For subsequent health checks - * Envoy will shift back to using either "unhealthy interval" if present or the standard health - * check interval that is defined. - * - * The default value for "unhealthy edge interval" is the same as "unhealthy interval". - */ - 'unhealthy_edge_interval'?: (_google_protobuf_Duration); - /** - * The "healthy edge interval" is a special health check interval that is used for the first - * health check right after a host is marked as healthy. For subsequent health checks - * Envoy will shift back to using the standard health check interval that is defined. - * - * The default value for "healthy edge interval" is the same as the default interval. - */ - 'healthy_edge_interval'?: (_google_protobuf_Duration); - /** - * Specifies the path to the :ref:`health check event log `. - * If empty, no event log will be written. - */ - 'event_log_path'?: (string); - /** - * An optional jitter amount as a percentage of interval_ms. If specified, - * during every interval Envoy will add interval_ms * - * interval_jitter_percent / 100 to the wait time. - * - * If interval_jitter_ms and interval_jitter_percent are both set, both of - * them will be used to increase the wait time. - */ - 'interval_jitter_percent'?: (number); - /** - * If set to true, health check failure events will always be logged. If set to false, only the - * initial health check failure event will be logged. - * The default value is false. - */ - 'always_log_health_check_failures'?: (boolean); - /** - * An optional jitter amount in milliseconds. If specified, Envoy will start health - * checking after for a random time in ms between 0 and initial_jitter. This only - * applies to the first health check. - */ - 'initial_jitter'?: (_google_protobuf_Duration); - /** - * This allows overriding the cluster TLS settings, just for health check connections. - */ - 'tls_options'?: (_envoy_api_v2_core_HealthCheck_TlsOptions); - /** - * [#not-implemented-hide:] - * The gRPC service for the health check event service. - * If empty, health check events won't be sent to a remote endpoint. - */ - 'event_service'?: (_envoy_api_v2_core_EventServiceConfig); - 'health_checker'?: "http_health_check"|"tcp_health_check"|"grpc_health_check"|"custom_health_check"; -} - -/** - * [#next-free-field: 23] - */ -export interface HealthCheck__Output { - /** - * The time to wait for a health check response. If the timeout is reached the - * health check attempt will be considered a failure. - */ - 'timeout'?: (_google_protobuf_Duration__Output); - /** - * The interval between health checks. - */ - 'interval'?: (_google_protobuf_Duration__Output); - /** - * An optional jitter amount in milliseconds. If specified, during every - * interval Envoy will add interval_jitter to the wait time. - */ - 'interval_jitter'?: (_google_protobuf_Duration__Output); - /** - * The number of unhealthy health checks required before a host is marked - * unhealthy. Note that for *http* health checking if a host responds with 503 - * this threshold is ignored and the host is considered unhealthy immediately. - */ - 'unhealthy_threshold'?: (_google_protobuf_UInt32Value__Output); - /** - * The number of healthy health checks required before a host is marked - * healthy. Note that during startup, only a single successful health check is - * required to mark a host healthy. - */ - 'healthy_threshold'?: (_google_protobuf_UInt32Value__Output); - /** - * [#not-implemented-hide:] Non-serving port for health checking. - */ - 'alt_port'?: (_google_protobuf_UInt32Value__Output); - /** - * Reuse health check connection between health checks. Default is true. - */ - 'reuse_connection'?: (_google_protobuf_BoolValue__Output); - /** - * HTTP health check. - */ - 'http_health_check'?: (_envoy_api_v2_core_HealthCheck_HttpHealthCheck__Output); - /** - * TCP health check. - */ - 'tcp_health_check'?: (_envoy_api_v2_core_HealthCheck_TcpHealthCheck__Output); - /** - * gRPC health check. - */ - 'grpc_health_check'?: (_envoy_api_v2_core_HealthCheck_GrpcHealthCheck__Output); - /** - * The "no traffic interval" is a special health check interval that is used when a cluster has - * never had traffic routed to it. This lower interval allows cluster information to be kept up to - * date, without sending a potentially large amount of active health checking traffic for no - * reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the - * standard health check interval that is defined. Note that this interval takes precedence over - * any other. - * - * The default value for "no traffic interval" is 60 seconds. - */ - 'no_traffic_interval'?: (_google_protobuf_Duration__Output); - /** - * Custom health check. - */ - 'custom_health_check'?: (_envoy_api_v2_core_HealthCheck_CustomHealthCheck__Output); - /** - * The "unhealthy interval" is a health check interval that is used for hosts that are marked as - * unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the - * standard health check interval that is defined. - * - * The default value for "unhealthy interval" is the same as "interval". - */ - 'unhealthy_interval'?: (_google_protobuf_Duration__Output); - /** - * The "unhealthy edge interval" is a special health check interval that is used for the first - * health check right after a host is marked as unhealthy. For subsequent health checks - * Envoy will shift back to using either "unhealthy interval" if present or the standard health - * check interval that is defined. - * - * The default value for "unhealthy edge interval" is the same as "unhealthy interval". - */ - 'unhealthy_edge_interval'?: (_google_protobuf_Duration__Output); - /** - * The "healthy edge interval" is a special health check interval that is used for the first - * health check right after a host is marked as healthy. For subsequent health checks - * Envoy will shift back to using the standard health check interval that is defined. - * - * The default value for "healthy edge interval" is the same as the default interval. - */ - 'healthy_edge_interval'?: (_google_protobuf_Duration__Output); - /** - * Specifies the path to the :ref:`health check event log `. - * If empty, no event log will be written. - */ - 'event_log_path': (string); - /** - * An optional jitter amount as a percentage of interval_ms. If specified, - * during every interval Envoy will add interval_ms * - * interval_jitter_percent / 100 to the wait time. - * - * If interval_jitter_ms and interval_jitter_percent are both set, both of - * them will be used to increase the wait time. - */ - 'interval_jitter_percent': (number); - /** - * If set to true, health check failure events will always be logged. If set to false, only the - * initial health check failure event will be logged. - * The default value is false. - */ - 'always_log_health_check_failures': (boolean); - /** - * An optional jitter amount in milliseconds. If specified, Envoy will start health - * checking after for a random time in ms between 0 and initial_jitter. This only - * applies to the first health check. - */ - 'initial_jitter'?: (_google_protobuf_Duration__Output); - /** - * This allows overriding the cluster TLS settings, just for health check connections. - */ - 'tls_options'?: (_envoy_api_v2_core_HealthCheck_TlsOptions__Output); - /** - * [#not-implemented-hide:] - * The gRPC service for the health check event service. - * If empty, health check events won't be sent to a remote endpoint. - */ - 'event_service'?: (_envoy_api_v2_core_EventServiceConfig__Output); - 'health_checker': "http_health_check"|"tcp_health_check"|"grpc_health_check"|"custom_health_check"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/HealthStatus.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/core/HealthStatus.ts deleted file mode 100644 index e1d572fa4..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/HealthStatus.ts +++ /dev/null @@ -1,36 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/health_check.proto - -/** - * Endpoint health status. - */ -export enum HealthStatus { - /** - * The health status is not known. This is interpreted by Envoy as *HEALTHY*. - */ - UNKNOWN = 0, - /** - * Healthy. - */ - HEALTHY = 1, - /** - * Unhealthy. - */ - UNHEALTHY = 2, - /** - * Connection draining in progress. E.g., - * ``_ - * or - * ``_. - * This is interpreted by Envoy as *UNHEALTHY*. - */ - DRAINING = 3, - /** - * Health check timed out. This is part of HDS and is interpreted by Envoy as - * *UNHEALTHY*. - */ - TIMEOUT = 4, - /** - * Degraded. - */ - DEGRADED = 5, -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/Http1ProtocolOptions.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/core/Http1ProtocolOptions.ts deleted file mode 100644 index b9bb0ce54..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/Http1ProtocolOptions.ts +++ /dev/null @@ -1,119 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/protocol.proto - -import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; - -export interface _envoy_api_v2_core_Http1ProtocolOptions_HeaderKeyFormat { - /** - * Formats the header by proper casing words: the first character and any character following - * a special character will be capitalized if it's an alpha character. For example, - * "content-type" becomes "Content-Type", and "foo$b#$are" becomes "Foo$B#$Are". - * Note that while this results in most headers following conventional casing, certain headers - * are not covered. For example, the "TE" header will be formatted as "Te". - */ - 'proper_case_words'?: (_envoy_api_v2_core_Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords); - 'header_format'?: "proper_case_words"; -} - -export interface _envoy_api_v2_core_Http1ProtocolOptions_HeaderKeyFormat__Output { - /** - * Formats the header by proper casing words: the first character and any character following - * a special character will be capitalized if it's an alpha character. For example, - * "content-type" becomes "Content-Type", and "foo$b#$are" becomes "Foo$B#$Are". - * Note that while this results in most headers following conventional casing, certain headers - * are not covered. For example, the "TE" header will be formatted as "Te". - */ - 'proper_case_words'?: (_envoy_api_v2_core_Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords__Output); - 'header_format': "proper_case_words"; -} - -export interface _envoy_api_v2_core_Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords { -} - -export interface _envoy_api_v2_core_Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords__Output { -} - -/** - * [#next-free-field: 6] - */ -export interface Http1ProtocolOptions { - /** - * Handle HTTP requests with absolute URLs in the requests. These requests - * are generally sent by clients to forward/explicit proxies. This allows clients to configure - * envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the - * *http_proxy* environment variable. - */ - 'allow_absolute_url'?: (_google_protobuf_BoolValue); - /** - * Handle incoming HTTP/1.0 and HTTP 0.9 requests. - * This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1 - * style connect logic, dechunking, and handling lack of client host iff - * *default_host_for_http_10* is configured. - */ - 'accept_http_10'?: (boolean); - /** - * A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as - * Envoy does not otherwise support HTTP/1.0 without a Host header. - * This is a no-op if *accept_http_10* is not true. - */ - 'default_host_for_http_10'?: (string); - /** - * Describes how the keys for response headers should be formatted. By default, all header keys - * are lower cased. - */ - 'header_key_format'?: (_envoy_api_v2_core_Http1ProtocolOptions_HeaderKeyFormat); - /** - * Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers. - * - * .. attention:: - * - * Note that this only happens when Envoy is chunk encoding which occurs when: - * - The request is HTTP/1.1. - * - Is neither a HEAD only request nor a HTTP Upgrade. - * - Not a response to a HEAD request. - * - The content length header is not present. - */ - 'enable_trailers'?: (boolean); -} - -/** - * [#next-free-field: 6] - */ -export interface Http1ProtocolOptions__Output { - /** - * Handle HTTP requests with absolute URLs in the requests. These requests - * are generally sent by clients to forward/explicit proxies. This allows clients to configure - * envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the - * *http_proxy* environment variable. - */ - 'allow_absolute_url'?: (_google_protobuf_BoolValue__Output); - /** - * Handle incoming HTTP/1.0 and HTTP 0.9 requests. - * This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1 - * style connect logic, dechunking, and handling lack of client host iff - * *default_host_for_http_10* is configured. - */ - 'accept_http_10': (boolean); - /** - * A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as - * Envoy does not otherwise support HTTP/1.0 without a Host header. - * This is a no-op if *accept_http_10* is not true. - */ - 'default_host_for_http_10': (string); - /** - * Describes how the keys for response headers should be formatted. By default, all header keys - * are lower cased. - */ - 'header_key_format'?: (_envoy_api_v2_core_Http1ProtocolOptions_HeaderKeyFormat__Output); - /** - * Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers. - * - * .. attention:: - * - * Note that this only happens when Envoy is chunk encoding which occurs when: - * - The request is HTTP/1.1. - * - Is neither a HEAD only request nor a HTTP Upgrade. - * - Not a response to a HEAD request. - * - The content length header is not present. - */ - 'enable_trailers': (boolean); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/HttpProtocolOptions.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/core/HttpProtocolOptions.ts deleted file mode 100644 index 219fdb0c7..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/HttpProtocolOptions.ts +++ /dev/null @@ -1,126 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/protocol.proto - -import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; -import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; - -// Original file: deps/envoy-api/envoy/api/v2/core/protocol.proto - -/** - * Action to take when Envoy receives client request with header names containing underscore - * characters. - * Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented - * as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore - * characters. - */ -export enum _envoy_api_v2_core_HttpProtocolOptions_HeadersWithUnderscoresAction { - /** - * Allow headers with underscores. This is the default behavior. - */ - ALLOW = 0, - /** - * Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests - * end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter - * is incremented for each rejected request. - */ - REJECT_REQUEST = 1, - /** - * Drop the header with name containing underscores. The header is dropped before the filter chain is - * invoked and as such filters will not see dropped headers. The - * "httpN.dropped_headers_with_underscores" is incremented for each dropped header. - */ - DROP_HEADER = 2, -} - -/** - * [#next-free-field: 6] - */ -export interface HttpProtocolOptions { - /** - * The idle timeout for connections. The idle timeout is defined as the - * period in which there are no active requests. When the - * idle timeout is reached the connection will be closed. If the connection is an HTTP/2 - * downstream connection a drain sequence will occur prior to closing the connection, see - * :ref:`drain_timeout - * `. - * Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive. - * If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0. - * - * .. warning:: - * Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP - * FIN packets, etc. - */ - 'idle_timeout'?: (_google_protobuf_Duration); - /** - * The maximum number of headers. If unconfigured, the default - * maximum number of request headers allowed is 100. Requests that exceed this limit will receive - * a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. - */ - 'max_headers_count'?: (_google_protobuf_UInt32Value); - /** - * The maximum duration of a connection. The duration is defined as a period since a connection - * was established. If not set, there is no max duration. When max_connection_duration is reached - * the connection will be closed. Drain sequence will occur prior to closing the connection if - * if's applicable. See :ref:`drain_timeout - * `. - * Note: not implemented for upstream connections. - */ - 'max_connection_duration'?: (_google_protobuf_Duration); - /** - * Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be - * reset independent of any other timeouts. If not specified, this value is not set. - */ - 'max_stream_duration'?: (_google_protobuf_Duration); - /** - * Action to take when a client request with a header name containing underscore characters is received. - * If this setting is not specified, the value defaults to ALLOW. - * Note: upstream responses are not affected by this setting. - */ - 'headers_with_underscores_action'?: (_envoy_api_v2_core_HttpProtocolOptions_HeadersWithUnderscoresAction | keyof typeof _envoy_api_v2_core_HttpProtocolOptions_HeadersWithUnderscoresAction); -} - -/** - * [#next-free-field: 6] - */ -export interface HttpProtocolOptions__Output { - /** - * The idle timeout for connections. The idle timeout is defined as the - * period in which there are no active requests. When the - * idle timeout is reached the connection will be closed. If the connection is an HTTP/2 - * downstream connection a drain sequence will occur prior to closing the connection, see - * :ref:`drain_timeout - * `. - * Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive. - * If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0. - * - * .. warning:: - * Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP - * FIN packets, etc. - */ - 'idle_timeout'?: (_google_protobuf_Duration__Output); - /** - * The maximum number of headers. If unconfigured, the default - * maximum number of request headers allowed is 100. Requests that exceed this limit will receive - * a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. - */ - 'max_headers_count'?: (_google_protobuf_UInt32Value__Output); - /** - * The maximum duration of a connection. The duration is defined as a period since a connection - * was established. If not set, there is no max duration. When max_connection_duration is reached - * the connection will be closed. Drain sequence will occur prior to closing the connection if - * if's applicable. See :ref:`drain_timeout - * `. - * Note: not implemented for upstream connections. - */ - 'max_connection_duration'?: (_google_protobuf_Duration__Output); - /** - * Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be - * reset independent of any other timeouts. If not specified, this value is not set. - */ - 'max_stream_duration'?: (_google_protobuf_Duration__Output); - /** - * Action to take when a client request with a header name containing underscore characters is received. - * If this setting is not specified, the value defaults to ALLOW. - * Note: upstream responses are not affected by this setting. - */ - 'headers_with_underscores_action': (keyof typeof _envoy_api_v2_core_HttpProtocolOptions_HeadersWithUnderscoresAction); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/RemoteDataSource.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/core/RemoteDataSource.ts deleted file mode 100644 index 93e722eef..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/RemoteDataSource.ts +++ /dev/null @@ -1,40 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/base.proto - -import type { HttpUri as _envoy_api_v2_core_HttpUri, HttpUri__Output as _envoy_api_v2_core_HttpUri__Output } from '../../../../envoy/api/v2/core/HttpUri'; -import type { RetryPolicy as _envoy_api_v2_core_RetryPolicy, RetryPolicy__Output as _envoy_api_v2_core_RetryPolicy__Output } from '../../../../envoy/api/v2/core/RetryPolicy'; - -/** - * The message specifies how to fetch data from remote and how to verify it. - */ -export interface RemoteDataSource { - /** - * The HTTP URI to fetch the remote data. - */ - 'http_uri'?: (_envoy_api_v2_core_HttpUri); - /** - * SHA256 string for verifying data. - */ - 'sha256'?: (string); - /** - * Retry policy for fetching remote data. - */ - 'retry_policy'?: (_envoy_api_v2_core_RetryPolicy); -} - -/** - * The message specifies how to fetch data from remote and how to verify it. - */ -export interface RemoteDataSource__Output { - /** - * The HTTP URI to fetch the remote data. - */ - 'http_uri'?: (_envoy_api_v2_core_HttpUri__Output); - /** - * SHA256 string for verifying data. - */ - 'sha256': (string); - /** - * Retry policy for fetching remote data. - */ - 'retry_policy'?: (_envoy_api_v2_core_RetryPolicy__Output); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/RequestMethod.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/core/RequestMethod.ts deleted file mode 100644 index 029e9882d..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/RequestMethod.ts +++ /dev/null @@ -1,17 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/base.proto - -/** - * HTTP request method. - */ -export enum RequestMethod { - METHOD_UNSPECIFIED = 0, - GET = 1, - HEAD = 2, - POST = 3, - PUT = 4, - DELETE = 5, - CONNECT = 6, - OPTIONS = 7, - TRACE = 8, - PATCH = 9, -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/RoutingPriority.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/core/RoutingPriority.ts deleted file mode 100644 index 5937fceb2..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/RoutingPriority.ts +++ /dev/null @@ -1,15 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/base.proto - -/** - * Envoy supports :ref:`upstream priority routing - * ` both at the route and the virtual - * cluster level. The current priority implementation uses different connection - * pool and circuit breaking settings for each priority level. This means that - * even for HTTP/2 requests, two physical connections will be used to an - * upstream host. In the future Envoy will likely support true HTTP/2 priority - * over a single upstream connection. - */ -export enum RoutingPriority { - DEFAULT = 0, - HIGH = 1, -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/SelfConfigSource.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/core/SelfConfigSource.ts deleted file mode 100644 index 144cfdf5a..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/SelfConfigSource.ts +++ /dev/null @@ -1,20 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/config_source.proto - - -/** - * [#not-implemented-hide:] - * Self-referencing config source options. This is currently empty, but when - * set in :ref:`ConfigSource ` can be used to - * specify that other data can be obtained from the same server. - */ -export interface SelfConfigSource { -} - -/** - * [#not-implemented-hide:] - * Self-referencing config source options. This is currently empty, but when - * set in :ref:`ConfigSource ` can be used to - * specify that other data can be obtained from the same server. - */ -export interface SelfConfigSource__Output { -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/SocketOption.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/core/SocketOption.ts deleted file mode 100644 index 4a32e46b4..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/SocketOption.ts +++ /dev/null @@ -1,90 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/socket_option.proto - -import type { Long } from '@grpc/proto-loader'; - -// Original file: deps/envoy-api/envoy/api/v2/core/socket_option.proto - -export enum _envoy_api_v2_core_SocketOption_SocketState { - /** - * Socket options are applied after socket creation but before binding the socket to a port - */ - STATE_PREBIND = 0, - /** - * Socket options are applied after binding the socket to a port but before calling listen() - */ - STATE_BOUND = 1, - /** - * Socket options are applied after calling listen() - */ - STATE_LISTENING = 2, -} - -/** - * Generic socket option message. This would be used to set socket options that - * might not exist in upstream kernels or precompiled Envoy binaries. - * [#next-free-field: 7] - */ -export interface SocketOption { - /** - * An optional name to give this socket option for debugging, etc. - * Uniqueness is not required and no special meaning is assumed. - */ - 'description'?: (string); - /** - * Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP - */ - 'level'?: (number | string | Long); - /** - * The numeric name as passed to setsockopt - */ - 'name'?: (number | string | Long); - /** - * Because many sockopts take an int value. - */ - 'int_value'?: (number | string | Long); - /** - * Otherwise it's a byte buffer. - */ - 'buf_value'?: (Buffer | Uint8Array | string); - /** - * The state in which the option will be applied. When used in BindConfig - * STATE_PREBIND is currently the only valid value. - */ - 'state'?: (_envoy_api_v2_core_SocketOption_SocketState | keyof typeof _envoy_api_v2_core_SocketOption_SocketState); - 'value'?: "int_value"|"buf_value"; -} - -/** - * Generic socket option message. This would be used to set socket options that - * might not exist in upstream kernels or precompiled Envoy binaries. - * [#next-free-field: 7] - */ -export interface SocketOption__Output { - /** - * An optional name to give this socket option for debugging, etc. - * Uniqueness is not required and no special meaning is assumed. - */ - 'description': (string); - /** - * Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP - */ - 'level': (string); - /** - * The numeric name as passed to setsockopt - */ - 'name': (string); - /** - * Because many sockopts take an int value. - */ - 'int_value'?: (string); - /** - * Otherwise it's a byte buffer. - */ - 'buf_value'?: (Buffer); - /** - * The state in which the option will be applied. When used in BindConfig - * STATE_PREBIND is currently the only valid value. - */ - 'state': (keyof typeof _envoy_api_v2_core_SocketOption_SocketState); - 'value': "int_value"|"buf_value"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/TrafficDirection.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/core/TrafficDirection.ts deleted file mode 100644 index 41cf36523..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/TrafficDirection.ts +++ /dev/null @@ -1,19 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/base.proto - -/** - * Identifies the direction of the traffic relative to the local Envoy. - */ -export enum TrafficDirection { - /** - * Default option is unspecified. - */ - UNSPECIFIED = 0, - /** - * The transport is used for incoming traffic. - */ - INBOUND = 1, - /** - * The transport is used for outgoing traffic. - */ - OUTBOUND = 2, -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/UpstreamHttpProtocolOptions.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/core/UpstreamHttpProtocolOptions.ts deleted file mode 100644 index 9c55560e8..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/UpstreamHttpProtocolOptions.ts +++ /dev/null @@ -1,34 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/protocol.proto - - -export interface UpstreamHttpProtocolOptions { - /** - * Set transport socket `SNI `_ for new - * upstream connections based on the downstream HTTP host/authority header, as seen by the - * :ref:`router filter `. - */ - 'auto_sni'?: (boolean); - /** - * Automatic validate upstream presented certificate for new upstream connections based on the - * downstream HTTP host/authority header, as seen by the - * :ref:`router filter `. - * This field is intended to set with `auto_sni` field. - */ - 'auto_san_validation'?: (boolean); -} - -export interface UpstreamHttpProtocolOptions__Output { - /** - * Set transport socket `SNI `_ for new - * upstream connections based on the downstream HTTP host/authority header, as seen by the - * :ref:`router filter `. - */ - 'auto_sni': (boolean); - /** - * Automatic validate upstream presented certificate for new upstream connections based on the - * downstream HTTP host/authority header, as seen by the - * :ref:`router filter `. - * This field is intended to set with `auto_sni` field. - */ - 'auto_san_validation': (boolean); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/listener/Filter.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/listener/Filter.ts deleted file mode 100644 index 2c6e0d087..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/listener/Filter.ts +++ /dev/null @@ -1,34 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/listener/listener_components.proto - -import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../google/protobuf/Struct'; -import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; - -export interface Filter { - /** - * The name of the filter to instantiate. The name must match a - * :ref:`supported filter `. - */ - 'name'?: (string); - 'config'?: (_google_protobuf_Struct); - 'typed_config'?: (_google_protobuf_Any); - /** - * Filter specific configuration which depends on the filter being - * instantiated. See the supported filters for further documentation. - */ - 'config_type'?: "config"|"typed_config"; -} - -export interface Filter__Output { - /** - * The name of the filter to instantiate. The name must match a - * :ref:`supported filter `. - */ - 'name': (string); - 'config'?: (_google_protobuf_Struct__Output); - 'typed_config'?: (_google_protobuf_Any__Output); - /** - * Filter specific configuration which depends on the filter being - * instantiated. See the supported filters for further documentation. - */ - 'config_type': "config"|"typed_config"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/listener/FilterChain.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/listener/FilterChain.ts deleted file mode 100644 index 1e98abe50..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/listener/FilterChain.ts +++ /dev/null @@ -1,118 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/listener/listener_components.proto - -import type { FilterChainMatch as _envoy_api_v2_listener_FilterChainMatch, FilterChainMatch__Output as _envoy_api_v2_listener_FilterChainMatch__Output } from '../../../../envoy/api/v2/listener/FilterChainMatch'; -import type { DownstreamTlsContext as _envoy_api_v2_auth_DownstreamTlsContext, DownstreamTlsContext__Output as _envoy_api_v2_auth_DownstreamTlsContext__Output } from '../../../../envoy/api/v2/auth/DownstreamTlsContext'; -import type { Filter as _envoy_api_v2_listener_Filter, Filter__Output as _envoy_api_v2_listener_Filter__Output } from '../../../../envoy/api/v2/listener/Filter'; -import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; -import type { Metadata as _envoy_api_v2_core_Metadata, Metadata__Output as _envoy_api_v2_core_Metadata__Output } from '../../../../envoy/api/v2/core/Metadata'; -import type { TransportSocket as _envoy_api_v2_core_TransportSocket, TransportSocket__Output as _envoy_api_v2_core_TransportSocket__Output } from '../../../../envoy/api/v2/core/TransportSocket'; - -/** - * A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and - * various other parameters. - * [#next-free-field: 8] - */ -export interface FilterChain { - /** - * The criteria to use when matching a connection to this filter chain. - */ - 'filter_chain_match'?: (_envoy_api_v2_listener_FilterChainMatch); - /** - * The TLS context for this filter chain. - * - * .. attention:: - * - * **This field is deprecated**. Use `transport_socket` with name `tls` instead. If both are - * set, `transport_socket` takes priority. - */ - 'tls_context'?: (_envoy_api_v2_auth_DownstreamTlsContext); - /** - * A list of individual network filters that make up the filter chain for - * connections established with the listener. Order matters as the filters are - * processed sequentially as connection events happen. Note: If the filter - * list is empty, the connection will close by default. - */ - 'filters'?: (_envoy_api_v2_listener_Filter)[]; - /** - * Whether the listener should expect a PROXY protocol V1 header on new - * connections. If this option is enabled, the listener will assume that that - * remote address of the connection is the one specified in the header. Some - * load balancers including the AWS ELB support this option. If the option is - * absent or set to false, Envoy will use the physical peer address of the - * connection as the remote address. - */ - 'use_proxy_proto'?: (_google_protobuf_BoolValue); - /** - * [#not-implemented-hide:] filter chain metadata. - */ - 'metadata'?: (_envoy_api_v2_core_Metadata); - /** - * Optional custom transport socket implementation to use for downstream connections. - * To setup TLS, set a transport socket with name `tls` and - * :ref:`DownstreamTlsContext ` in the `typed_config`. - * If no transport socket configuration is specified, new connections - * will be set up with plaintext. - */ - 'transport_socket'?: (_envoy_api_v2_core_TransportSocket); - /** - * [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no - * name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter - * chain is to be dynamically updated or removed via FCDS a unique name must be provided. - */ - 'name'?: (string); -} - -/** - * A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and - * various other parameters. - * [#next-free-field: 8] - */ -export interface FilterChain__Output { - /** - * The criteria to use when matching a connection to this filter chain. - */ - 'filter_chain_match'?: (_envoy_api_v2_listener_FilterChainMatch__Output); - /** - * The TLS context for this filter chain. - * - * .. attention:: - * - * **This field is deprecated**. Use `transport_socket` with name `tls` instead. If both are - * set, `transport_socket` takes priority. - */ - 'tls_context'?: (_envoy_api_v2_auth_DownstreamTlsContext__Output); - /** - * A list of individual network filters that make up the filter chain for - * connections established with the listener. Order matters as the filters are - * processed sequentially as connection events happen. Note: If the filter - * list is empty, the connection will close by default. - */ - 'filters': (_envoy_api_v2_listener_Filter__Output)[]; - /** - * Whether the listener should expect a PROXY protocol V1 header on new - * connections. If this option is enabled, the listener will assume that that - * remote address of the connection is the one specified in the header. Some - * load balancers including the AWS ELB support this option. If the option is - * absent or set to false, Envoy will use the physical peer address of the - * connection as the remote address. - */ - 'use_proxy_proto'?: (_google_protobuf_BoolValue__Output); - /** - * [#not-implemented-hide:] filter chain metadata. - */ - 'metadata'?: (_envoy_api_v2_core_Metadata__Output); - /** - * Optional custom transport socket implementation to use for downstream connections. - * To setup TLS, set a transport socket with name `tls` and - * :ref:`DownstreamTlsContext ` in the `typed_config`. - * If no transport socket configuration is specified, new connections - * will be set up with plaintext. - */ - 'transport_socket'?: (_envoy_api_v2_core_TransportSocket__Output); - /** - * [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no - * name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter - * chain is to be dynamically updated or removed via FCDS a unique name must be provided. - */ - 'name': (string); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/listener/ListenerFilter.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/listener/ListenerFilter.ts deleted file mode 100644 index 080d922b1..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/listener/ListenerFilter.ts +++ /dev/null @@ -1,47 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/listener/listener_components.proto - -import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../google/protobuf/Struct'; -import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; -import type { ListenerFilterChainMatchPredicate as _envoy_api_v2_listener_ListenerFilterChainMatchPredicate, ListenerFilterChainMatchPredicate__Output as _envoy_api_v2_listener_ListenerFilterChainMatchPredicate__Output } from '../../../../envoy/api/v2/listener/ListenerFilterChainMatchPredicate'; - -export interface ListenerFilter { - /** - * The name of the filter to instantiate. The name must match a - * :ref:`supported filter `. - */ - 'name'?: (string); - 'config'?: (_google_protobuf_Struct); - 'typed_config'?: (_google_protobuf_Any); - /** - * Optional match predicate used to disable the filter. The filter is enabled when this field is empty. - * See :ref:`ListenerFilterChainMatchPredicate ` - * for further examples. - */ - 'filter_disabled'?: (_envoy_api_v2_listener_ListenerFilterChainMatchPredicate); - /** - * Filter specific configuration which depends on the filter being instantiated. - * See the supported filters for further documentation. - */ - 'config_type'?: "config"|"typed_config"; -} - -export interface ListenerFilter__Output { - /** - * The name of the filter to instantiate. The name must match a - * :ref:`supported filter `. - */ - 'name': (string); - 'config'?: (_google_protobuf_Struct__Output); - 'typed_config'?: (_google_protobuf_Any__Output); - /** - * Optional match predicate used to disable the filter. The filter is enabled when this field is empty. - * See :ref:`ListenerFilterChainMatchPredicate ` - * for further examples. - */ - 'filter_disabled'?: (_envoy_api_v2_listener_ListenerFilterChainMatchPredicate__Output); - /** - * Filter specific configuration which depends on the filter being instantiated. - * See the supported filters for further documentation. - */ - 'config_type': "config"|"typed_config"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/listener/UdpListenerConfig.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/listener/UdpListenerConfig.ts deleted file mode 100644 index 2299c5719..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/listener/UdpListenerConfig.ts +++ /dev/null @@ -1,36 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/listener/udp_listener_config.proto - -import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../google/protobuf/Struct'; -import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; - -export interface UdpListenerConfig { - /** - * Used to look up UDP listener factory, matches "raw_udp_listener" or - * "quic_listener" to create a specific udp listener. - * If not specified, treat as "raw_udp_listener". - */ - 'udp_listener_name'?: (string); - 'config'?: (_google_protobuf_Struct); - 'typed_config'?: (_google_protobuf_Any); - /** - * Used to create a specific listener factory. To some factory, e.g. - * "raw_udp_listener", config is not needed. - */ - 'config_type'?: "config"|"typed_config"; -} - -export interface UdpListenerConfig__Output { - /** - * Used to look up UDP listener factory, matches "raw_udp_listener" or - * "quic_listener" to create a specific udp listener. - * If not specified, treat as "raw_udp_listener". - */ - 'udp_listener_name': (string); - 'config'?: (_google_protobuf_Struct__Output); - 'typed_config'?: (_google_protobuf_Any__Output); - /** - * Used to create a specific listener factory. To some factory, e.g. - * "raw_udp_listener", config is not needed. - */ - 'config_type': "config"|"typed_config"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/CorsPolicy.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/route/CorsPolicy.ts deleted file mode 100644 index 7b76b9d85..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/CorsPolicy.ts +++ /dev/null @@ -1,169 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/route/route_components.proto - -import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; -import type { RuntimeFractionalPercent as _envoy_api_v2_core_RuntimeFractionalPercent, RuntimeFractionalPercent__Output as _envoy_api_v2_core_RuntimeFractionalPercent__Output } from '../../../../envoy/api/v2/core/RuntimeFractionalPercent'; -import type { StringMatcher as _envoy_type_matcher_StringMatcher, StringMatcher__Output as _envoy_type_matcher_StringMatcher__Output } from '../../../../envoy/type/matcher/StringMatcher'; - -/** - * [#next-free-field: 12] - */ -export interface CorsPolicy { - /** - * Specifies the origins that will be allowed to do CORS requests. - * - * An origin is allowed if either allow_origin or allow_origin_regex match. - * - * .. attention:: - * This field has been deprecated in favor of `allow_origin_string_match`. - */ - 'allow_origin'?: (string)[]; - /** - * Specifies the content for the *access-control-allow-methods* header. - */ - 'allow_methods'?: (string); - /** - * Specifies the content for the *access-control-allow-headers* header. - */ - 'allow_headers'?: (string); - /** - * Specifies the content for the *access-control-expose-headers* header. - */ - 'expose_headers'?: (string); - /** - * Specifies the content for the *access-control-max-age* header. - */ - 'max_age'?: (string); - /** - * Specifies whether the resource allows credentials. - */ - 'allow_credentials'?: (_google_protobuf_BoolValue); - /** - * Specifies if the CORS filter is enabled. Defaults to true. Only effective on route. - * - * .. attention:: - * - * **This field is deprecated**. Set the - * :ref:`filter_enabled` field instead. - */ - 'enabled'?: (_google_protobuf_BoolValue); - /** - * Specifies regex patterns that match allowed origins. - * - * An origin is allowed if either allow_origin or allow_origin_regex match. - * - * .. attention:: - * This field has been deprecated in favor of `allow_origin_string_match` as it is not safe for - * use with untrusted input in all cases. - */ - 'allow_origin_regex'?: (string)[]; - /** - * Specifies the % of requests for which the CORS filter is enabled. - * - * If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS - * filter will be enabled for 100% of the requests. - * - * If :ref:`runtime_key ` is - * specified, Envoy will lookup the runtime key to get the percentage of requests to filter. - */ - 'filter_enabled'?: (_envoy_api_v2_core_RuntimeFractionalPercent); - /** - * Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not - * enforced. - * - * This field is intended to be used when ``filter_enabled`` and ``enabled`` are off. One of those - * fields have to explicitly disable the filter in order for this setting to take effect. - * - * If :ref:`runtime_key ` is specified, - * Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate - * and track the request's *Origin* to determine if it's valid but will not enforce any policies. - */ - 'shadow_enabled'?: (_envoy_api_v2_core_RuntimeFractionalPercent); - /** - * Specifies string patterns that match allowed origins. An origin is allowed if any of the - * string matchers match. - */ - 'allow_origin_string_match'?: (_envoy_type_matcher_StringMatcher)[]; - 'enabled_specifier'?: "enabled"|"filter_enabled"; -} - -/** - * [#next-free-field: 12] - */ -export interface CorsPolicy__Output { - /** - * Specifies the origins that will be allowed to do CORS requests. - * - * An origin is allowed if either allow_origin or allow_origin_regex match. - * - * .. attention:: - * This field has been deprecated in favor of `allow_origin_string_match`. - */ - 'allow_origin': (string)[]; - /** - * Specifies the content for the *access-control-allow-methods* header. - */ - 'allow_methods': (string); - /** - * Specifies the content for the *access-control-allow-headers* header. - */ - 'allow_headers': (string); - /** - * Specifies the content for the *access-control-expose-headers* header. - */ - 'expose_headers': (string); - /** - * Specifies the content for the *access-control-max-age* header. - */ - 'max_age': (string); - /** - * Specifies whether the resource allows credentials. - */ - 'allow_credentials'?: (_google_protobuf_BoolValue__Output); - /** - * Specifies if the CORS filter is enabled. Defaults to true. Only effective on route. - * - * .. attention:: - * - * **This field is deprecated**. Set the - * :ref:`filter_enabled` field instead. - */ - 'enabled'?: (_google_protobuf_BoolValue__Output); - /** - * Specifies regex patterns that match allowed origins. - * - * An origin is allowed if either allow_origin or allow_origin_regex match. - * - * .. attention:: - * This field has been deprecated in favor of `allow_origin_string_match` as it is not safe for - * use with untrusted input in all cases. - */ - 'allow_origin_regex': (string)[]; - /** - * Specifies the % of requests for which the CORS filter is enabled. - * - * If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS - * filter will be enabled for 100% of the requests. - * - * If :ref:`runtime_key ` is - * specified, Envoy will lookup the runtime key to get the percentage of requests to filter. - */ - 'filter_enabled'?: (_envoy_api_v2_core_RuntimeFractionalPercent__Output); - /** - * Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not - * enforced. - * - * This field is intended to be used when ``filter_enabled`` and ``enabled`` are off. One of those - * fields have to explicitly disable the filter in order for this setting to take effect. - * - * If :ref:`runtime_key ` is specified, - * Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate - * and track the request's *Origin* to determine if it's valid but will not enforce any policies. - */ - 'shadow_enabled'?: (_envoy_api_v2_core_RuntimeFractionalPercent__Output); - /** - * Specifies string patterns that match allowed origins. An origin is allowed if any of the - * string matchers match. - */ - 'allow_origin_string_match': (_envoy_type_matcher_StringMatcher__Output)[]; - 'enabled_specifier': "enabled"|"filter_enabled"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/DirectResponseAction.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/route/DirectResponseAction.ts deleted file mode 100644 index 83777f9e3..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/DirectResponseAction.ts +++ /dev/null @@ -1,39 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/route/route_components.proto - -import type { DataSource as _envoy_api_v2_core_DataSource, DataSource__Output as _envoy_api_v2_core_DataSource__Output } from '../../../../envoy/api/v2/core/DataSource'; - -export interface DirectResponseAction { - /** - * Specifies the HTTP response status to be returned. - */ - 'status'?: (number); - /** - * Specifies the content of the response body. If this setting is omitted, - * no body is included in the generated response. - * - * .. note:: - * - * Headers can be specified using *response_headers_to_add* in the enclosing - * :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_RouteConfiguration` or - * :ref:`envoy_api_msg_route.VirtualHost`. - */ - 'body'?: (_envoy_api_v2_core_DataSource); -} - -export interface DirectResponseAction__Output { - /** - * Specifies the HTTP response status to be returned. - */ - 'status': (number); - /** - * Specifies the content of the response body. If this setting is omitted, - * no body is included in the generated response. - * - * .. note:: - * - * Headers can be specified using *response_headers_to_add* in the enclosing - * :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_RouteConfiguration` or - * :ref:`envoy_api_msg_route.VirtualHost`. - */ - 'body'?: (_envoy_api_v2_core_DataSource__Output); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/HeaderMatcher.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/route/HeaderMatcher.ts deleted file mode 100644 index 347849901..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/HeaderMatcher.ts +++ /dev/null @@ -1,227 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/route/route_components.proto - -import type { Int64Range as _envoy_type_Int64Range, Int64Range__Output as _envoy_type_Int64Range__Output } from '../../../../envoy/type/Int64Range'; -import type { RegexMatcher as _envoy_type_matcher_RegexMatcher, RegexMatcher__Output as _envoy_type_matcher_RegexMatcher__Output } from '../../../../envoy/type/matcher/RegexMatcher'; -import type { Long } from '@grpc/proto-loader'; - -/** - * .. attention:: - * - * Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host* - * header. Thus, if attempting to match on *Host*, match on *:authority* instead. - * - * .. attention:: - * - * To route on HTTP method, use the special HTTP/2 *:method* header. This works for both - * HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., - * - * .. code-block:: json - * - * { - * "name": ":method", - * "exact_match": "POST" - * } - * - * .. attention:: - * In the absence of any header match specifier, match will default to :ref:`present_match - * `. i.e, a request that has the :ref:`name - * ` header will match, regardless of the header's - * value. - * - * [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] - * [#next-free-field: 12] - */ -export interface HeaderMatcher { - /** - * Specifies the name of the header in the request. - */ - 'name'?: (string); - /** - * If specified, header match will be performed based on the value of the header. - */ - 'exact_match'?: (string); - /** - * If specified, this regex string is a regular expression rule which implies the entire request - * header value must match the regex. The rule will not match if only a subsequence of the - * request header value matches the regex. The regex grammar used in the value field is defined - * `here `_. - * - * Examples: - * - * * The regex ``\d{3}`` matches the value *123* - * * The regex ``\d{3}`` does not match the value *1234* - * * The regex ``\d{3}`` does not match the value *123.456* - * - * .. attention:: - * This field has been deprecated in favor of `safe_regex_match` as it is not safe for use - * with untrusted input in all cases. - */ - 'regex_match'?: (string); - /** - * If specified, header match will be performed based on range. - * The rule will match if the request header value is within this range. - * The entire request header value must represent an integer in base 10 notation: consisting of - * an optional plus or minus sign followed by a sequence of digits. The rule will not match if - * the header value does not represent an integer. Match will fail for empty values, floating - * point numbers or if only a subsequence of the header value is an integer. - * - * Examples: - * - * * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, - * "-1somestring" - */ - 'range_match'?: (_envoy_type_Int64Range); - /** - * If specified, header match will be performed based on whether the header is in the - * request. - */ - 'present_match'?: (boolean); - /** - * If specified, the match result will be inverted before checking. Defaults to false. - * - * Examples: - * - * * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. - * * The range [-10,0) will match the value -1, so it will not match when inverted. - */ - 'invert_match'?: (boolean); - /** - * If specified, header match will be performed based on the prefix of the header value. - * Note: empty prefix is not allowed, please use present_match instead. - * - * Examples: - * - * * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. - */ - 'prefix_match'?: (string); - /** - * If specified, header match will be performed based on the suffix of the header value. - * Note: empty suffix is not allowed, please use present_match instead. - * - * Examples: - * - * * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. - */ - 'suffix_match'?: (string); - /** - * If specified, this regex string is a regular expression rule which implies the entire request - * header value must match the regex. The rule will not match if only a subsequence of the - * request header value matches the regex. - */ - 'safe_regex_match'?: (_envoy_type_matcher_RegexMatcher); - /** - * Specifies how the header match will be performed to route the request. - */ - 'header_match_specifier'?: "exact_match"|"regex_match"|"safe_regex_match"|"range_match"|"present_match"|"prefix_match"|"suffix_match"; -} - -/** - * .. attention:: - * - * Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host* - * header. Thus, if attempting to match on *Host*, match on *:authority* instead. - * - * .. attention:: - * - * To route on HTTP method, use the special HTTP/2 *:method* header. This works for both - * HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., - * - * .. code-block:: json - * - * { - * "name": ":method", - * "exact_match": "POST" - * } - * - * .. attention:: - * In the absence of any header match specifier, match will default to :ref:`present_match - * `. i.e, a request that has the :ref:`name - * ` header will match, regardless of the header's - * value. - * - * [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] - * [#next-free-field: 12] - */ -export interface HeaderMatcher__Output { - /** - * Specifies the name of the header in the request. - */ - 'name': (string); - /** - * If specified, header match will be performed based on the value of the header. - */ - 'exact_match'?: (string); - /** - * If specified, this regex string is a regular expression rule which implies the entire request - * header value must match the regex. The rule will not match if only a subsequence of the - * request header value matches the regex. The regex grammar used in the value field is defined - * `here `_. - * - * Examples: - * - * * The regex ``\d{3}`` matches the value *123* - * * The regex ``\d{3}`` does not match the value *1234* - * * The regex ``\d{3}`` does not match the value *123.456* - * - * .. attention:: - * This field has been deprecated in favor of `safe_regex_match` as it is not safe for use - * with untrusted input in all cases. - */ - 'regex_match'?: (string); - /** - * If specified, header match will be performed based on range. - * The rule will match if the request header value is within this range. - * The entire request header value must represent an integer in base 10 notation: consisting of - * an optional plus or minus sign followed by a sequence of digits. The rule will not match if - * the header value does not represent an integer. Match will fail for empty values, floating - * point numbers or if only a subsequence of the header value is an integer. - * - * Examples: - * - * * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, - * "-1somestring" - */ - 'range_match'?: (_envoy_type_Int64Range__Output); - /** - * If specified, header match will be performed based on whether the header is in the - * request. - */ - 'present_match'?: (boolean); - /** - * If specified, the match result will be inverted before checking. Defaults to false. - * - * Examples: - * - * * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. - * * The range [-10,0) will match the value -1, so it will not match when inverted. - */ - 'invert_match': (boolean); - /** - * If specified, header match will be performed based on the prefix of the header value. - * Note: empty prefix is not allowed, please use present_match instead. - * - * Examples: - * - * * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. - */ - 'prefix_match'?: (string); - /** - * If specified, header match will be performed based on the suffix of the header value. - * Note: empty suffix is not allowed, please use present_match instead. - * - * Examples: - * - * * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. - */ - 'suffix_match'?: (string); - /** - * If specified, this regex string is a regular expression rule which implies the entire request - * header value must match the regex. The rule will not match if only a subsequence of the - * request header value matches the regex. - */ - 'safe_regex_match'?: (_envoy_type_matcher_RegexMatcher__Output); - /** - * Specifies how the header match will be performed to route the request. - */ - 'header_match_specifier': "exact_match"|"regex_match"|"safe_regex_match"|"range_match"|"present_match"|"prefix_match"|"suffix_match"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/HedgePolicy.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/route/HedgePolicy.ts deleted file mode 100644 index 8134fc359..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/HedgePolicy.ts +++ /dev/null @@ -1,66 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/route/route_components.proto - -import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; -import type { FractionalPercent as _envoy_type_FractionalPercent, FractionalPercent__Output as _envoy_type_FractionalPercent__Output } from '../../../../envoy/type/FractionalPercent'; - -/** - * HTTP request hedging :ref:`architecture overview `. - */ -export interface HedgePolicy { - /** - * Specifies the number of initial requests that should be sent upstream. - * Must be at least 1. - * Defaults to 1. - * [#not-implemented-hide:] - */ - 'initial_requests'?: (_google_protobuf_UInt32Value); - /** - * Specifies a probability that an additional upstream request should be sent - * on top of what is specified by initial_requests. - * Defaults to 0. - * [#not-implemented-hide:] - */ - 'additional_request_chance'?: (_envoy_type_FractionalPercent); - /** - * Indicates that a hedged request should be sent when the per-try timeout - * is hit. This will only occur if the retry policy also indicates that a - * timed out request should be retried. - * Once a timed out request is retried due to per try timeout, the router - * filter will ensure that it is not retried again even if the returned - * response headers would otherwise be retried according the specified - * :ref:`RetryPolicy `. - * Defaults to false. - */ - 'hedge_on_per_try_timeout'?: (boolean); -} - -/** - * HTTP request hedging :ref:`architecture overview `. - */ -export interface HedgePolicy__Output { - /** - * Specifies the number of initial requests that should be sent upstream. - * Must be at least 1. - * Defaults to 1. - * [#not-implemented-hide:] - */ - 'initial_requests'?: (_google_protobuf_UInt32Value__Output); - /** - * Specifies a probability that an additional upstream request should be sent - * on top of what is specified by initial_requests. - * Defaults to 0. - * [#not-implemented-hide:] - */ - 'additional_request_chance'?: (_envoy_type_FractionalPercent__Output); - /** - * Indicates that a hedged request should be sent when the per-try timeout - * is hit. This will only occur if the retry policy also indicates that a - * timed out request should be retried. - * Once a timed out request is retried due to per try timeout, the router - * filter will ensure that it is not retried again even if the returned - * response headers would otherwise be retried according the specified - * :ref:`RetryPolicy `. - * Defaults to false. - */ - 'hedge_on_per_try_timeout': (boolean); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/QueryParameterMatcher.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/route/QueryParameterMatcher.ts deleted file mode 100644 index 68f4fbcaa..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/QueryParameterMatcher.ts +++ /dev/null @@ -1,86 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/route/route_components.proto - -import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; -import type { StringMatcher as _envoy_type_matcher_StringMatcher, StringMatcher__Output as _envoy_type_matcher_StringMatcher__Output } from '../../../../envoy/type/matcher/StringMatcher'; - -/** - * Query parameter matching treats the query string of a request's :path header - * as an ampersand-separated list of keys and/or key=value elements. - * [#next-free-field: 7] - */ -export interface QueryParameterMatcher { - /** - * Specifies the name of a key that must be present in the requested - * *path*'s query string. - */ - 'name'?: (string); - /** - * Specifies the value of the key. If the value is absent, a request - * that contains the key in its query string will match, whether the - * key appears with a value (e.g., "?debug=true") or not (e.g., "?debug") - * - * ..attention:: - * This field is deprecated. Use an `exact` match inside the `string_match` field. - */ - 'value'?: (string); - /** - * Specifies whether the query parameter value is a regular expression. - * Defaults to false. The entire query parameter value (i.e., the part to - * the right of the equals sign in "key=value") must match the regex. - * E.g., the regex ``\d+$`` will match *123* but not *a123* or *123a*. - * - * ..attention:: - * This field is deprecated. Use a `safe_regex` match inside the `string_match` field. - */ - 'regex'?: (_google_protobuf_BoolValue); - /** - * Specifies whether a query parameter value should match against a string. - */ - 'string_match'?: (_envoy_type_matcher_StringMatcher); - /** - * Specifies whether a query parameter should be present. - */ - 'present_match'?: (boolean); - 'query_parameter_match_specifier'?: "string_match"|"present_match"; -} - -/** - * Query parameter matching treats the query string of a request's :path header - * as an ampersand-separated list of keys and/or key=value elements. - * [#next-free-field: 7] - */ -export interface QueryParameterMatcher__Output { - /** - * Specifies the name of a key that must be present in the requested - * *path*'s query string. - */ - 'name': (string); - /** - * Specifies the value of the key. If the value is absent, a request - * that contains the key in its query string will match, whether the - * key appears with a value (e.g., "?debug=true") or not (e.g., "?debug") - * - * ..attention:: - * This field is deprecated. Use an `exact` match inside the `string_match` field. - */ - 'value': (string); - /** - * Specifies whether the query parameter value is a regular expression. - * Defaults to false. The entire query parameter value (i.e., the part to - * the right of the equals sign in "key=value") must match the regex. - * E.g., the regex ``\d+$`` will match *123* but not *a123* or *123a*. - * - * ..attention:: - * This field is deprecated. Use a `safe_regex` match inside the `string_match` field. - */ - 'regex'?: (_google_protobuf_BoolValue__Output); - /** - * Specifies whether a query parameter value should match against a string. - */ - 'string_match'?: (_envoy_type_matcher_StringMatcher__Output); - /** - * Specifies whether a query parameter should be present. - */ - 'present_match'?: (boolean); - 'query_parameter_match_specifier': "string_match"|"present_match"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/RateLimit.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/route/RateLimit.ts deleted file mode 100644 index 998d94e53..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/RateLimit.ts +++ /dev/null @@ -1,341 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/route/route_components.proto - -import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; -import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; -import type { HeaderMatcher as _envoy_api_v2_route_HeaderMatcher, HeaderMatcher__Output as _envoy_api_v2_route_HeaderMatcher__Output } from '../../../../envoy/api/v2/route/HeaderMatcher'; - -/** - * [#next-free-field: 7] - */ -export interface _envoy_api_v2_route_RateLimit_Action { - /** - * Rate limit on source cluster. - */ - 'source_cluster'?: (_envoy_api_v2_route_RateLimit_Action_SourceCluster); - /** - * Rate limit on destination cluster. - */ - 'destination_cluster'?: (_envoy_api_v2_route_RateLimit_Action_DestinationCluster); - /** - * Rate limit on request headers. - */ - 'request_headers'?: (_envoy_api_v2_route_RateLimit_Action_RequestHeaders); - /** - * Rate limit on remote address. - */ - 'remote_address'?: (_envoy_api_v2_route_RateLimit_Action_RemoteAddress); - /** - * Rate limit on a generic key. - */ - 'generic_key'?: (_envoy_api_v2_route_RateLimit_Action_GenericKey); - /** - * Rate limit on the existence of request headers. - */ - 'header_value_match'?: (_envoy_api_v2_route_RateLimit_Action_HeaderValueMatch); - 'action_specifier'?: "source_cluster"|"destination_cluster"|"request_headers"|"remote_address"|"generic_key"|"header_value_match"; -} - -/** - * [#next-free-field: 7] - */ -export interface _envoy_api_v2_route_RateLimit_Action__Output { - /** - * Rate limit on source cluster. - */ - 'source_cluster'?: (_envoy_api_v2_route_RateLimit_Action_SourceCluster__Output); - /** - * Rate limit on destination cluster. - */ - 'destination_cluster'?: (_envoy_api_v2_route_RateLimit_Action_DestinationCluster__Output); - /** - * Rate limit on request headers. - */ - 'request_headers'?: (_envoy_api_v2_route_RateLimit_Action_RequestHeaders__Output); - /** - * Rate limit on remote address. - */ - 'remote_address'?: (_envoy_api_v2_route_RateLimit_Action_RemoteAddress__Output); - /** - * Rate limit on a generic key. - */ - 'generic_key'?: (_envoy_api_v2_route_RateLimit_Action_GenericKey__Output); - /** - * Rate limit on the existence of request headers. - */ - 'header_value_match'?: (_envoy_api_v2_route_RateLimit_Action_HeaderValueMatch__Output); - 'action_specifier': "source_cluster"|"destination_cluster"|"request_headers"|"remote_address"|"generic_key"|"header_value_match"; -} - -/** - * The following descriptor entry is appended to the descriptor: - * - * .. code-block:: cpp - * - * ("destination_cluster", "") - * - * Once a request matches against a route table rule, a routed cluster is determined by one of - * the following :ref:`route table configuration ` - * settings: - * - * * :ref:`cluster ` indicates the upstream cluster - * to route to. - * * :ref:`weighted_clusters ` - * chooses a cluster randomly from a set of clusters with attributed weight. - * * :ref:`cluster_header ` indicates which - * header in the request contains the target cluster. - */ -export interface _envoy_api_v2_route_RateLimit_Action_DestinationCluster { -} - -/** - * The following descriptor entry is appended to the descriptor: - * - * .. code-block:: cpp - * - * ("destination_cluster", "") - * - * Once a request matches against a route table rule, a routed cluster is determined by one of - * the following :ref:`route table configuration ` - * settings: - * - * * :ref:`cluster ` indicates the upstream cluster - * to route to. - * * :ref:`weighted_clusters ` - * chooses a cluster randomly from a set of clusters with attributed weight. - * * :ref:`cluster_header ` indicates which - * header in the request contains the target cluster. - */ -export interface _envoy_api_v2_route_RateLimit_Action_DestinationCluster__Output { -} - -/** - * The following descriptor entry is appended to the descriptor: - * - * .. code-block:: cpp - * - * ("generic_key", "") - */ -export interface _envoy_api_v2_route_RateLimit_Action_GenericKey { - /** - * The value to use in the descriptor entry. - */ - 'descriptor_value'?: (string); -} - -/** - * The following descriptor entry is appended to the descriptor: - * - * .. code-block:: cpp - * - * ("generic_key", "") - */ -export interface _envoy_api_v2_route_RateLimit_Action_GenericKey__Output { - /** - * The value to use in the descriptor entry. - */ - 'descriptor_value': (string); -} - -/** - * The following descriptor entry is appended to the descriptor: - * - * .. code-block:: cpp - * - * ("header_match", "") - */ -export interface _envoy_api_v2_route_RateLimit_Action_HeaderValueMatch { - /** - * The value to use in the descriptor entry. - */ - 'descriptor_value'?: (string); - /** - * If set to true, the action will append a descriptor entry when the - * request matches the headers. If set to false, the action will append a - * descriptor entry when the request does not match the headers. The - * default value is true. - */ - 'expect_match'?: (_google_protobuf_BoolValue); - /** - * Specifies a set of headers that the rate limit action should match - * on. The action will check the request’s headers against all the - * specified headers in the config. A match will happen if all the - * headers in the config are present in the request with the same values - * (or based on presence if the value field is not in the config). - */ - 'headers'?: (_envoy_api_v2_route_HeaderMatcher)[]; -} - -/** - * The following descriptor entry is appended to the descriptor: - * - * .. code-block:: cpp - * - * ("header_match", "") - */ -export interface _envoy_api_v2_route_RateLimit_Action_HeaderValueMatch__Output { - /** - * The value to use in the descriptor entry. - */ - 'descriptor_value': (string); - /** - * If set to true, the action will append a descriptor entry when the - * request matches the headers. If set to false, the action will append a - * descriptor entry when the request does not match the headers. The - * default value is true. - */ - 'expect_match'?: (_google_protobuf_BoolValue__Output); - /** - * Specifies a set of headers that the rate limit action should match - * on. The action will check the request’s headers against all the - * specified headers in the config. A match will happen if all the - * headers in the config are present in the request with the same values - * (or based on presence if the value field is not in the config). - */ - 'headers': (_envoy_api_v2_route_HeaderMatcher__Output)[]; -} - -/** - * The following descriptor entry is appended to the descriptor and is populated using the - * trusted address from :ref:`x-forwarded-for `: - * - * .. code-block:: cpp - * - * ("remote_address", "") - */ -export interface _envoy_api_v2_route_RateLimit_Action_RemoteAddress { -} - -/** - * The following descriptor entry is appended to the descriptor and is populated using the - * trusted address from :ref:`x-forwarded-for `: - * - * .. code-block:: cpp - * - * ("remote_address", "") - */ -export interface _envoy_api_v2_route_RateLimit_Action_RemoteAddress__Output { -} - -/** - * The following descriptor entry is appended when a header contains a key that matches the - * *header_name*: - * - * .. code-block:: cpp - * - * ("", "") - */ -export interface _envoy_api_v2_route_RateLimit_Action_RequestHeaders { - /** - * The header name to be queried from the request headers. The header’s - * value is used to populate the value of the descriptor entry for the - * descriptor_key. - */ - 'header_name'?: (string); - /** - * The key to use in the descriptor entry. - */ - 'descriptor_key'?: (string); -} - -/** - * The following descriptor entry is appended when a header contains a key that matches the - * *header_name*: - * - * .. code-block:: cpp - * - * ("", "") - */ -export interface _envoy_api_v2_route_RateLimit_Action_RequestHeaders__Output { - /** - * The header name to be queried from the request headers. The header’s - * value is used to populate the value of the descriptor entry for the - * descriptor_key. - */ - 'header_name': (string); - /** - * The key to use in the descriptor entry. - */ - 'descriptor_key': (string); -} - -/** - * The following descriptor entry is appended to the descriptor: - * - * .. code-block:: cpp - * - * ("source_cluster", "") - * - * is derived from the :option:`--service-cluster` option. - */ -export interface _envoy_api_v2_route_RateLimit_Action_SourceCluster { -} - -/** - * The following descriptor entry is appended to the descriptor: - * - * .. code-block:: cpp - * - * ("source_cluster", "") - * - * is derived from the :option:`--service-cluster` option. - */ -export interface _envoy_api_v2_route_RateLimit_Action_SourceCluster__Output { -} - -/** - * Global rate limiting :ref:`architecture overview `. - */ -export interface RateLimit { - /** - * Refers to the stage set in the filter. The rate limit configuration only - * applies to filters with the same stage number. The default stage number is - * 0. - * - * .. note:: - * - * The filter supports a range of 0 - 10 inclusively for stage numbers. - */ - 'stage'?: (_google_protobuf_UInt32Value); - /** - * The key to be set in runtime to disable this rate limit configuration. - */ - 'disable_key'?: (string); - /** - * A list of actions that are to be applied for this rate limit configuration. - * Order matters as the actions are processed sequentially and the descriptor - * is composed by appending descriptor entries in that sequence. If an action - * cannot append a descriptor entry, no descriptor is generated for the - * configuration. See :ref:`composing actions - * ` for additional documentation. - */ - 'actions'?: (_envoy_api_v2_route_RateLimit_Action)[]; -} - -/** - * Global rate limiting :ref:`architecture overview `. - */ -export interface RateLimit__Output { - /** - * Refers to the stage set in the filter. The rate limit configuration only - * applies to filters with the same stage number. The default stage number is - * 0. - * - * .. note:: - * - * The filter supports a range of 0 - 10 inclusively for stage numbers. - */ - 'stage'?: (_google_protobuf_UInt32Value__Output); - /** - * The key to be set in runtime to disable this rate limit configuration. - */ - 'disable_key': (string); - /** - * A list of actions that are to be applied for this rate limit configuration. - * Order matters as the actions are processed sequentially and the descriptor - * is composed by appending descriptor entries in that sequence. If an action - * cannot append a descriptor entry, no descriptor is generated for the - * configuration. See :ref:`composing actions - * ` for additional documentation. - */ - 'actions': (_envoy_api_v2_route_RateLimit_Action__Output)[]; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/RedirectAction.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/route/RedirectAction.ts deleted file mode 100644 index de7105a54..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/RedirectAction.ts +++ /dev/null @@ -1,139 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/route/route_components.proto - - -// Original file: deps/envoy-api/envoy/api/v2/route/route_components.proto - -export enum _envoy_api_v2_route_RedirectAction_RedirectResponseCode { - /** - * Moved Permanently HTTP Status Code - 301. - */ - MOVED_PERMANENTLY = 0, - /** - * Found HTTP Status Code - 302. - */ - FOUND = 1, - /** - * See Other HTTP Status Code - 303. - */ - SEE_OTHER = 2, - /** - * Temporary Redirect HTTP Status Code - 307. - */ - TEMPORARY_REDIRECT = 3, - /** - * Permanent Redirect HTTP Status Code - 308. - */ - PERMANENT_REDIRECT = 4, -} - -/** - * [#next-free-field: 9] - */ -export interface RedirectAction { - /** - * The host portion of the URL will be swapped with this value. - */ - 'host_redirect'?: (string); - /** - * The path portion of the URL will be swapped with this value. - */ - 'path_redirect'?: (string); - /** - * The HTTP status code to use in the redirect response. The default response - * code is MOVED_PERMANENTLY (301). - */ - 'response_code'?: (_envoy_api_v2_route_RedirectAction_RedirectResponseCode | keyof typeof _envoy_api_v2_route_RedirectAction_RedirectResponseCode); - /** - * The scheme portion of the URL will be swapped with "https". - */ - 'https_redirect'?: (boolean); - /** - * Indicates that during redirection, the matched prefix (or path) - * should be swapped with this value. This option allows redirect URLs be dynamically created - * based on the request. - * - * .. attention:: - * - * Pay attention to the use of trailing slashes as mentioned in - * :ref:`RouteAction's prefix_rewrite `. - */ - 'prefix_rewrite'?: (string); - /** - * Indicates that during redirection, the query portion of the URL will - * be removed. Default value is false. - */ - 'strip_query'?: (boolean); - /** - * The scheme portion of the URL will be swapped with this value. - */ - 'scheme_redirect'?: (string); - /** - * The port value of the URL will be swapped with this value. - */ - 'port_redirect'?: (number); - /** - * When the scheme redirection take place, the following rules apply: - * 1. If the source URI scheme is `http` and the port is explicitly - * set to `:80`, the port will be removed after the redirection - * 2. If the source URI scheme is `https` and the port is explicitly - * set to `:443`, the port will be removed after the redirection - */ - 'scheme_rewrite_specifier'?: "https_redirect"|"scheme_redirect"; - 'path_rewrite_specifier'?: "path_redirect"|"prefix_rewrite"; -} - -/** - * [#next-free-field: 9] - */ -export interface RedirectAction__Output { - /** - * The host portion of the URL will be swapped with this value. - */ - 'host_redirect': (string); - /** - * The path portion of the URL will be swapped with this value. - */ - 'path_redirect'?: (string); - /** - * The HTTP status code to use in the redirect response. The default response - * code is MOVED_PERMANENTLY (301). - */ - 'response_code': (keyof typeof _envoy_api_v2_route_RedirectAction_RedirectResponseCode); - /** - * The scheme portion of the URL will be swapped with "https". - */ - 'https_redirect'?: (boolean); - /** - * Indicates that during redirection, the matched prefix (or path) - * should be swapped with this value. This option allows redirect URLs be dynamically created - * based on the request. - * - * .. attention:: - * - * Pay attention to the use of trailing slashes as mentioned in - * :ref:`RouteAction's prefix_rewrite `. - */ - 'prefix_rewrite'?: (string); - /** - * Indicates that during redirection, the query portion of the URL will - * be removed. Default value is false. - */ - 'strip_query': (boolean); - /** - * The scheme portion of the URL will be swapped with this value. - */ - 'scheme_redirect'?: (string); - /** - * The port value of the URL will be swapped with this value. - */ - 'port_redirect': (number); - /** - * When the scheme redirection take place, the following rules apply: - * 1. If the source URI scheme is `http` and the port is explicitly - * set to `:80`, the port will be removed after the redirection - * 2. If the source URI scheme is `https` and the port is explicitly - * set to `:443`, the port will be removed after the redirection - */ - 'scheme_rewrite_specifier': "https_redirect"|"scheme_redirect"; - 'path_rewrite_specifier': "path_redirect"|"prefix_rewrite"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/RetryPolicy.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/route/RetryPolicy.ts deleted file mode 100644 index 63b7deb5a..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/RetryPolicy.ts +++ /dev/null @@ -1,218 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/route/route_components.proto - -import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; -import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; -import type { HeaderMatcher as _envoy_api_v2_route_HeaderMatcher, HeaderMatcher__Output as _envoy_api_v2_route_HeaderMatcher__Output } from '../../../../envoy/api/v2/route/HeaderMatcher'; -import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../google/protobuf/Struct'; -import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; -import type { Long } from '@grpc/proto-loader'; - -export interface _envoy_api_v2_route_RetryPolicy_RetryBackOff { - /** - * Specifies the base interval between retries. This parameter is required and must be greater - * than zero. Values less than 1 ms are rounded up to 1 ms. - * See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's - * back-off algorithm. - */ - 'base_interval'?: (_google_protobuf_Duration); - /** - * Specifies the maximum interval between retries. This parameter is optional, but must be - * greater than or equal to the `base_interval` if set. The default is 10 times the - * `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion - * of Envoy's back-off algorithm. - */ - 'max_interval'?: (_google_protobuf_Duration); -} - -export interface _envoy_api_v2_route_RetryPolicy_RetryBackOff__Output { - /** - * Specifies the base interval between retries. This parameter is required and must be greater - * than zero. Values less than 1 ms are rounded up to 1 ms. - * See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's - * back-off algorithm. - */ - 'base_interval'?: (_google_protobuf_Duration__Output); - /** - * Specifies the maximum interval between retries. This parameter is optional, but must be - * greater than or equal to the `base_interval` if set. The default is 10 times the - * `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion - * of Envoy's back-off algorithm. - */ - 'max_interval'?: (_google_protobuf_Duration__Output); -} - -export interface _envoy_api_v2_route_RetryPolicy_RetryHostPredicate { - 'name'?: (string); - 'config'?: (_google_protobuf_Struct); - 'typed_config'?: (_google_protobuf_Any); - 'config_type'?: "config"|"typed_config"; -} - -export interface _envoy_api_v2_route_RetryPolicy_RetryHostPredicate__Output { - 'name': (string); - 'config'?: (_google_protobuf_Struct__Output); - 'typed_config'?: (_google_protobuf_Any__Output); - 'config_type': "config"|"typed_config"; -} - -export interface _envoy_api_v2_route_RetryPolicy_RetryPriority { - 'name'?: (string); - 'config'?: (_google_protobuf_Struct); - 'typed_config'?: (_google_protobuf_Any); - 'config_type'?: "config"|"typed_config"; -} - -export interface _envoy_api_v2_route_RetryPolicy_RetryPriority__Output { - 'name': (string); - 'config'?: (_google_protobuf_Struct__Output); - 'typed_config'?: (_google_protobuf_Any__Output); - 'config_type': "config"|"typed_config"; -} - -/** - * HTTP retry :ref:`architecture overview `. - * [#next-free-field: 11] - */ -export interface RetryPolicy { - /** - * Specifies the conditions under which retry takes place. These are the same - * conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and - * :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. - */ - 'retry_on'?: (string); - /** - * Specifies the allowed number of retries. This parameter is optional and - * defaults to 1. These are the same conditions documented for - * :ref:`config_http_filters_router_x-envoy-max-retries`. - */ - 'num_retries'?: (_google_protobuf_UInt32Value); - /** - * Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The - * same conditions documented for - * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. - * - * .. note:: - * - * If left unspecified, Envoy will use the global - * :ref:`route timeout ` for the request. - * Consequently, when using a :ref:`5xx ` based - * retry policy, a request that times out will not be retried as the total timeout budget - * would have been exhausted. - */ - 'per_try_timeout'?: (_google_protobuf_Duration); - /** - * Specifies an implementation of a RetryPriority which is used to determine the - * distribution of load across priorities used for retries. Refer to - * :ref:`retry plugin configuration ` for more details. - */ - 'retry_priority'?: (_envoy_api_v2_route_RetryPolicy_RetryPriority); - /** - * Specifies a collection of RetryHostPredicates that will be consulted when selecting a host - * for retries. If any of the predicates reject the host, host selection will be reattempted. - * Refer to :ref:`retry plugin configuration ` for more - * details. - */ - 'retry_host_predicate'?: (_envoy_api_v2_route_RetryPolicy_RetryHostPredicate)[]; - /** - * The maximum number of times host selection will be reattempted before giving up, at which - * point the host that was last selected will be routed to. If unspecified, this will default to - * retrying once. - */ - 'host_selection_retry_max_attempts'?: (number | string | Long); - /** - * HTTP status codes that should trigger a retry in addition to those specified by retry_on. - */ - 'retriable_status_codes'?: (number)[]; - /** - * Specifies parameters that control retry back off. This parameter is optional, in which case the - * default base interval is 25 milliseconds or, if set, the current value of the - * `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times - * the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` - * describes Envoy's back-off algorithm. - */ - 'retry_back_off'?: (_envoy_api_v2_route_RetryPolicy_RetryBackOff); - /** - * HTTP response headers that trigger a retry if present in the response. A retry will be - * triggered if any of the header matches match the upstream response headers. - * The field is only consulted if 'retriable-headers' retry policy is active. - */ - 'retriable_headers'?: (_envoy_api_v2_route_HeaderMatcher)[]; - /** - * HTTP headers which must be present in the request for retries to be attempted. - */ - 'retriable_request_headers'?: (_envoy_api_v2_route_HeaderMatcher)[]; -} - -/** - * HTTP retry :ref:`architecture overview `. - * [#next-free-field: 11] - */ -export interface RetryPolicy__Output { - /** - * Specifies the conditions under which retry takes place. These are the same - * conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and - * :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. - */ - 'retry_on': (string); - /** - * Specifies the allowed number of retries. This parameter is optional and - * defaults to 1. These are the same conditions documented for - * :ref:`config_http_filters_router_x-envoy-max-retries`. - */ - 'num_retries'?: (_google_protobuf_UInt32Value__Output); - /** - * Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The - * same conditions documented for - * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. - * - * .. note:: - * - * If left unspecified, Envoy will use the global - * :ref:`route timeout ` for the request. - * Consequently, when using a :ref:`5xx ` based - * retry policy, a request that times out will not be retried as the total timeout budget - * would have been exhausted. - */ - 'per_try_timeout'?: (_google_protobuf_Duration__Output); - /** - * Specifies an implementation of a RetryPriority which is used to determine the - * distribution of load across priorities used for retries. Refer to - * :ref:`retry plugin configuration ` for more details. - */ - 'retry_priority'?: (_envoy_api_v2_route_RetryPolicy_RetryPriority__Output); - /** - * Specifies a collection of RetryHostPredicates that will be consulted when selecting a host - * for retries. If any of the predicates reject the host, host selection will be reattempted. - * Refer to :ref:`retry plugin configuration ` for more - * details. - */ - 'retry_host_predicate': (_envoy_api_v2_route_RetryPolicy_RetryHostPredicate__Output)[]; - /** - * The maximum number of times host selection will be reattempted before giving up, at which - * point the host that was last selected will be routed to. If unspecified, this will default to - * retrying once. - */ - 'host_selection_retry_max_attempts': (string); - /** - * HTTP status codes that should trigger a retry in addition to those specified by retry_on. - */ - 'retriable_status_codes': (number)[]; - /** - * Specifies parameters that control retry back off. This parameter is optional, in which case the - * default base interval is 25 milliseconds or, if set, the current value of the - * `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times - * the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` - * describes Envoy's back-off algorithm. - */ - 'retry_back_off'?: (_envoy_api_v2_route_RetryPolicy_RetryBackOff__Output); - /** - * HTTP response headers that trigger a retry if present in the response. A retry will be - * triggered if any of the header matches match the upstream response headers. - * The field is only consulted if 'retriable-headers' retry policy is active. - */ - 'retriable_headers': (_envoy_api_v2_route_HeaderMatcher__Output)[]; - /** - * HTTP headers which must be present in the request for retries to be attempted. - */ - 'retriable_request_headers': (_envoy_api_v2_route_HeaderMatcher__Output)[]; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/Route.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/route/Route.ts deleted file mode 100644 index 86cb4ac3d..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/Route.ts +++ /dev/null @@ -1,228 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/route/route_components.proto - -import type { RouteMatch as _envoy_api_v2_route_RouteMatch, RouteMatch__Output as _envoy_api_v2_route_RouteMatch__Output } from '../../../../envoy/api/v2/route/RouteMatch'; -import type { RouteAction as _envoy_api_v2_route_RouteAction, RouteAction__Output as _envoy_api_v2_route_RouteAction__Output } from '../../../../envoy/api/v2/route/RouteAction'; -import type { RedirectAction as _envoy_api_v2_route_RedirectAction, RedirectAction__Output as _envoy_api_v2_route_RedirectAction__Output } from '../../../../envoy/api/v2/route/RedirectAction'; -import type { Metadata as _envoy_api_v2_core_Metadata, Metadata__Output as _envoy_api_v2_core_Metadata__Output } from '../../../../envoy/api/v2/core/Metadata'; -import type { Decorator as _envoy_api_v2_route_Decorator, Decorator__Output as _envoy_api_v2_route_Decorator__Output } from '../../../../envoy/api/v2/route/Decorator'; -import type { DirectResponseAction as _envoy_api_v2_route_DirectResponseAction, DirectResponseAction__Output as _envoy_api_v2_route_DirectResponseAction__Output } from '../../../../envoy/api/v2/route/DirectResponseAction'; -import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../google/protobuf/Struct'; -import type { HeaderValueOption as _envoy_api_v2_core_HeaderValueOption, HeaderValueOption__Output as _envoy_api_v2_core_HeaderValueOption__Output } from '../../../../envoy/api/v2/core/HeaderValueOption'; -import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; -import type { Tracing as _envoy_api_v2_route_Tracing, Tracing__Output as _envoy_api_v2_route_Tracing__Output } from '../../../../envoy/api/v2/route/Tracing'; -import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; -import type { FilterAction as _envoy_api_v2_route_FilterAction, FilterAction__Output as _envoy_api_v2_route_FilterAction__Output } from '../../../../envoy/api/v2/route/FilterAction'; - -/** - * A route is both a specification of how to match a request as well as an indication of what to do - * next (e.g., redirect, forward, rewrite, etc.). - * - * .. attention:: - * - * Envoy supports routing on HTTP method via :ref:`header matching - * `. - * [#next-free-field: 18] - */ -export interface Route { - /** - * Route matching parameters. - */ - 'match'?: (_envoy_api_v2_route_RouteMatch); - /** - * Route request to some upstream cluster. - */ - 'route'?: (_envoy_api_v2_route_RouteAction); - /** - * Return a redirect. - */ - 'redirect'?: (_envoy_api_v2_route_RedirectAction); - /** - * The Metadata field can be used to provide additional information - * about the route. It can be used for configuration, stats, and logging. - * The metadata should go under the filter namespace that will need it. - * For instance, if the metadata is intended for the Router filter, - * the filter name should be specified as *envoy.filters.http.router*. - */ - 'metadata'?: (_envoy_api_v2_core_Metadata); - /** - * Decorator for the matched route. - */ - 'decorator'?: (_envoy_api_v2_route_Decorator); - /** - * Return an arbitrary HTTP response directly, without proxying. - */ - 'direct_response'?: (_envoy_api_v2_route_DirectResponseAction); - /** - * The per_filter_config field can be used to provide route-specific - * configurations for filters. The key should match the filter name, such as - * *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - * specific; see the :ref:`HTTP filter documentation ` for - * if and how it is utilized. - */ - 'per_filter_config'?: ({[key: string]: _google_protobuf_Struct}); - /** - * Specifies a set of headers that will be added to requests matching this - * route. Headers specified at this level are applied before headers from the - * enclosing :ref:`envoy_api_msg_route.VirtualHost` and - * :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on - * header value syntax, see the documentation on :ref:`custom request headers - * `. - */ - 'request_headers_to_add'?: (_envoy_api_v2_core_HeaderValueOption)[]; - /** - * Specifies a set of headers that will be added to responses to requests - * matching this route. Headers specified at this level are applied before - * headers from the enclosing :ref:`envoy_api_msg_route.VirtualHost` and - * :ref:`envoy_api_msg_RouteConfiguration`. For more information, including - * details on header value syntax, see the documentation on - * :ref:`custom request headers `. - */ - 'response_headers_to_add'?: (_envoy_api_v2_core_HeaderValueOption)[]; - /** - * Specifies a list of HTTP headers that should be removed from each response - * to requests matching this route. - */ - 'response_headers_to_remove'?: (string)[]; - /** - * Specifies a list of HTTP headers that should be removed from each request - * matching this route. - */ - 'request_headers_to_remove'?: (string)[]; - /** - * The typed_per_filter_config field can be used to provide route-specific - * configurations for filters. The key should match the filter name, such as - * *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - * specific; see the :ref:`HTTP filter documentation ` for - * if and how it is utilized. - */ - 'typed_per_filter_config'?: ({[key: string]: _google_protobuf_Any}); - /** - * Name for the route. - */ - 'name'?: (string); - /** - * Presence of the object defines whether the connection manager's tracing configuration - * is overridden by this route specific instance. - */ - 'tracing'?: (_envoy_api_v2_route_Tracing); - /** - * The maximum bytes which will be buffered for retries and shadowing. - * If set, the bytes actually buffered will be the minimum value of this and the - * listener per_connection_buffer_limit_bytes. - */ - 'per_request_buffer_limit_bytes'?: (_google_protobuf_UInt32Value); - /** - * [#not-implemented-hide:] - * If true, a filter will define the action (e.g., it could dynamically generate the - * RouteAction). - */ - 'filter_action'?: (_envoy_api_v2_route_FilterAction); - 'action'?: "route"|"redirect"|"direct_response"|"filter_action"; -} - -/** - * A route is both a specification of how to match a request as well as an indication of what to do - * next (e.g., redirect, forward, rewrite, etc.). - * - * .. attention:: - * - * Envoy supports routing on HTTP method via :ref:`header matching - * `. - * [#next-free-field: 18] - */ -export interface Route__Output { - /** - * Route matching parameters. - */ - 'match'?: (_envoy_api_v2_route_RouteMatch__Output); - /** - * Route request to some upstream cluster. - */ - 'route'?: (_envoy_api_v2_route_RouteAction__Output); - /** - * Return a redirect. - */ - 'redirect'?: (_envoy_api_v2_route_RedirectAction__Output); - /** - * The Metadata field can be used to provide additional information - * about the route. It can be used for configuration, stats, and logging. - * The metadata should go under the filter namespace that will need it. - * For instance, if the metadata is intended for the Router filter, - * the filter name should be specified as *envoy.filters.http.router*. - */ - 'metadata'?: (_envoy_api_v2_core_Metadata__Output); - /** - * Decorator for the matched route. - */ - 'decorator'?: (_envoy_api_v2_route_Decorator__Output); - /** - * Return an arbitrary HTTP response directly, without proxying. - */ - 'direct_response'?: (_envoy_api_v2_route_DirectResponseAction__Output); - /** - * The per_filter_config field can be used to provide route-specific - * configurations for filters. The key should match the filter name, such as - * *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - * specific; see the :ref:`HTTP filter documentation ` for - * if and how it is utilized. - */ - 'per_filter_config'?: ({[key: string]: _google_protobuf_Struct__Output}); - /** - * Specifies a set of headers that will be added to requests matching this - * route. Headers specified at this level are applied before headers from the - * enclosing :ref:`envoy_api_msg_route.VirtualHost` and - * :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on - * header value syntax, see the documentation on :ref:`custom request headers - * `. - */ - 'request_headers_to_add': (_envoy_api_v2_core_HeaderValueOption__Output)[]; - /** - * Specifies a set of headers that will be added to responses to requests - * matching this route. Headers specified at this level are applied before - * headers from the enclosing :ref:`envoy_api_msg_route.VirtualHost` and - * :ref:`envoy_api_msg_RouteConfiguration`. For more information, including - * details on header value syntax, see the documentation on - * :ref:`custom request headers `. - */ - 'response_headers_to_add': (_envoy_api_v2_core_HeaderValueOption__Output)[]; - /** - * Specifies a list of HTTP headers that should be removed from each response - * to requests matching this route. - */ - 'response_headers_to_remove': (string)[]; - /** - * Specifies a list of HTTP headers that should be removed from each request - * matching this route. - */ - 'request_headers_to_remove': (string)[]; - /** - * The typed_per_filter_config field can be used to provide route-specific - * configurations for filters. The key should match the filter name, such as - * *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - * specific; see the :ref:`HTTP filter documentation ` for - * if and how it is utilized. - */ - 'typed_per_filter_config'?: ({[key: string]: _google_protobuf_Any__Output}); - /** - * Name for the route. - */ - 'name': (string); - /** - * Presence of the object defines whether the connection manager's tracing configuration - * is overridden by this route specific instance. - */ - 'tracing'?: (_envoy_api_v2_route_Tracing__Output); - /** - * The maximum bytes which will be buffered for retries and shadowing. - * If set, the bytes actually buffered will be the minimum value of this and the - * listener per_connection_buffer_limit_bytes. - */ - 'per_request_buffer_limit_bytes'?: (_google_protobuf_UInt32Value__Output); - /** - * [#not-implemented-hide:] - * If true, a filter will define the action (e.g., it could dynamically generate the - * RouteAction). - */ - 'filter_action'?: (_envoy_api_v2_route_FilterAction__Output); - 'action': "route"|"redirect"|"direct_response"|"filter_action"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/RouteAction.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/route/RouteAction.ts deleted file mode 100644 index 5f07bae9b..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/RouteAction.ts +++ /dev/null @@ -1,1004 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/route/route_components.proto - -import type { WeightedCluster as _envoy_api_v2_route_WeightedCluster, WeightedCluster__Output as _envoy_api_v2_route_WeightedCluster__Output } from '../../../../envoy/api/v2/route/WeightedCluster'; -import type { Metadata as _envoy_api_v2_core_Metadata, Metadata__Output as _envoy_api_v2_core_Metadata__Output } from '../../../../envoy/api/v2/core/Metadata'; -import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; -import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; -import type { RetryPolicy as _envoy_api_v2_route_RetryPolicy, RetryPolicy__Output as _envoy_api_v2_route_RetryPolicy__Output } from '../../../../envoy/api/v2/route/RetryPolicy'; -import type { RoutingPriority as _envoy_api_v2_core_RoutingPriority } from '../../../../envoy/api/v2/core/RoutingPriority'; -import type { RateLimit as _envoy_api_v2_route_RateLimit, RateLimit__Output as _envoy_api_v2_route_RateLimit__Output } from '../../../../envoy/api/v2/route/RateLimit'; -import type { CorsPolicy as _envoy_api_v2_route_CorsPolicy, CorsPolicy__Output as _envoy_api_v2_route_CorsPolicy__Output } from '../../../../envoy/api/v2/route/CorsPolicy'; -import type { HedgePolicy as _envoy_api_v2_route_HedgePolicy, HedgePolicy__Output as _envoy_api_v2_route_HedgePolicy__Output } from '../../../../envoy/api/v2/route/HedgePolicy'; -import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; -import type { RegexMatchAndSubstitute as _envoy_type_matcher_RegexMatchAndSubstitute, RegexMatchAndSubstitute__Output as _envoy_type_matcher_RegexMatchAndSubstitute__Output } from '../../../../envoy/type/matcher/RegexMatchAndSubstitute'; -import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; -import type { RuntimeFractionalPercent as _envoy_api_v2_core_RuntimeFractionalPercent, RuntimeFractionalPercent__Output as _envoy_api_v2_core_RuntimeFractionalPercent__Output } from '../../../../envoy/api/v2/core/RuntimeFractionalPercent'; - -// Original file: deps/envoy-api/envoy/api/v2/route/route_components.proto - -export enum _envoy_api_v2_route_RouteAction_ClusterNotFoundResponseCode { - /** - * HTTP status code - 503 Service Unavailable. - */ - SERVICE_UNAVAILABLE = 0, - /** - * HTTP status code - 404 Not Found. - */ - NOT_FOUND = 1, -} - -export interface _envoy_api_v2_route_RouteAction_HashPolicy_ConnectionProperties { - /** - * Hash on source IP address. - */ - 'source_ip'?: (boolean); -} - -export interface _envoy_api_v2_route_RouteAction_HashPolicy_ConnectionProperties__Output { - /** - * Hash on source IP address. - */ - 'source_ip': (boolean); -} - -/** - * Envoy supports two types of cookie affinity: - * - * 1. Passive. Envoy takes a cookie that's present in the cookies header and - * hashes on its value. - * - * 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) - * on the first request from the client in its response to the client, - * based on the endpoint the request gets sent to. The client then - * presents this on the next and all subsequent requests. The hash of - * this is sufficient to ensure these requests get sent to the same - * endpoint. The cookie is generated by hashing the source and - * destination ports and addresses so that multiple independent HTTP2 - * streams on the same connection will independently receive the same - * cookie, even if they arrive at the Envoy simultaneously. - */ -export interface _envoy_api_v2_route_RouteAction_HashPolicy_Cookie { - /** - * The name of the cookie that will be used to obtain the hash key. If the - * cookie is not present and ttl below is not set, no hash will be - * produced. - */ - 'name'?: (string); - /** - * If specified, a cookie with the TTL will be generated if the cookie is - * not present. If the TTL is present and zero, the generated cookie will - * be a session cookie. - */ - 'ttl'?: (_google_protobuf_Duration); - /** - * The name of the path for the cookie. If no path is specified here, no path - * will be set for the cookie. - */ - 'path'?: (string); -} - -/** - * Envoy supports two types of cookie affinity: - * - * 1. Passive. Envoy takes a cookie that's present in the cookies header and - * hashes on its value. - * - * 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) - * on the first request from the client in its response to the client, - * based on the endpoint the request gets sent to. The client then - * presents this on the next and all subsequent requests. The hash of - * this is sufficient to ensure these requests get sent to the same - * endpoint. The cookie is generated by hashing the source and - * destination ports and addresses so that multiple independent HTTP2 - * streams on the same connection will independently receive the same - * cookie, even if they arrive at the Envoy simultaneously. - */ -export interface _envoy_api_v2_route_RouteAction_HashPolicy_Cookie__Output { - /** - * The name of the cookie that will be used to obtain the hash key. If the - * cookie is not present and ttl below is not set, no hash will be - * produced. - */ - 'name': (string); - /** - * If specified, a cookie with the TTL will be generated if the cookie is - * not present. If the TTL is present and zero, the generated cookie will - * be a session cookie. - */ - 'ttl'?: (_google_protobuf_Duration__Output); - /** - * The name of the path for the cookie. If no path is specified here, no path - * will be set for the cookie. - */ - 'path': (string); -} - -export interface _envoy_api_v2_route_RouteAction_HashPolicy_FilterState { - /** - * The name of the Object in the per-request filterState, which is an - * Envoy::Http::Hashable object. If there is no data associated with the key, - * or the stored object is not Envoy::Http::Hashable, no hash will be produced. - */ - 'key'?: (string); -} - -export interface _envoy_api_v2_route_RouteAction_HashPolicy_FilterState__Output { - /** - * The name of the Object in the per-request filterState, which is an - * Envoy::Http::Hashable object. If there is no data associated with the key, - * or the stored object is not Envoy::Http::Hashable, no hash will be produced. - */ - 'key': (string); -} - -/** - * Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer - * `. - * [#next-free-field: 7] - */ -export interface _envoy_api_v2_route_RouteAction_HashPolicy { - /** - * Header hash policy. - */ - 'header'?: (_envoy_api_v2_route_RouteAction_HashPolicy_Header); - /** - * Cookie hash policy. - */ - 'cookie'?: (_envoy_api_v2_route_RouteAction_HashPolicy_Cookie); - /** - * Connection properties hash policy. - */ - 'connection_properties'?: (_envoy_api_v2_route_RouteAction_HashPolicy_ConnectionProperties); - /** - * Query parameter hash policy. - */ - 'query_parameter'?: (_envoy_api_v2_route_RouteAction_HashPolicy_QueryParameter); - /** - * Filter state hash policy. - */ - 'filter_state'?: (_envoy_api_v2_route_RouteAction_HashPolicy_FilterState); - /** - * The flag that short-circuits the hash computing. This field provides a - * 'fallback' style of configuration: "if a terminal policy doesn't work, - * fallback to rest of the policy list", it saves time when the terminal - * policy works. - * - * If true, and there is already a hash computed, ignore rest of the - * list of hash polices. - * For example, if the following hash methods are configured: - * - * ========= ======== - * specifier terminal - * ========= ======== - * Header A true - * Header B false - * Header C false - * ========= ======== - * - * The generateHash process ends if policy "header A" generates a hash, as - * it's a terminal policy. - */ - 'terminal'?: (boolean); - 'policy_specifier'?: "header"|"cookie"|"connection_properties"|"query_parameter"|"filter_state"; -} - -/** - * Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer - * `. - * [#next-free-field: 7] - */ -export interface _envoy_api_v2_route_RouteAction_HashPolicy__Output { - /** - * Header hash policy. - */ - 'header'?: (_envoy_api_v2_route_RouteAction_HashPolicy_Header__Output); - /** - * Cookie hash policy. - */ - 'cookie'?: (_envoy_api_v2_route_RouteAction_HashPolicy_Cookie__Output); - /** - * Connection properties hash policy. - */ - 'connection_properties'?: (_envoy_api_v2_route_RouteAction_HashPolicy_ConnectionProperties__Output); - /** - * Query parameter hash policy. - */ - 'query_parameter'?: (_envoy_api_v2_route_RouteAction_HashPolicy_QueryParameter__Output); - /** - * Filter state hash policy. - */ - 'filter_state'?: (_envoy_api_v2_route_RouteAction_HashPolicy_FilterState__Output); - /** - * The flag that short-circuits the hash computing. This field provides a - * 'fallback' style of configuration: "if a terminal policy doesn't work, - * fallback to rest of the policy list", it saves time when the terminal - * policy works. - * - * If true, and there is already a hash computed, ignore rest of the - * list of hash polices. - * For example, if the following hash methods are configured: - * - * ========= ======== - * specifier terminal - * ========= ======== - * Header A true - * Header B false - * Header C false - * ========= ======== - * - * The generateHash process ends if policy "header A" generates a hash, as - * it's a terminal policy. - */ - 'terminal': (boolean); - 'policy_specifier': "header"|"cookie"|"connection_properties"|"query_parameter"|"filter_state"; -} - -export interface _envoy_api_v2_route_RouteAction_HashPolicy_Header { - /** - * The name of the request header that will be used to obtain the hash - * key. If the request header is not present, no hash will be produced. - */ - 'header_name'?: (string); -} - -export interface _envoy_api_v2_route_RouteAction_HashPolicy_Header__Output { - /** - * The name of the request header that will be used to obtain the hash - * key. If the request header is not present, no hash will be produced. - */ - 'header_name': (string); -} - -// Original file: deps/envoy-api/envoy/api/v2/route/route_components.proto - -/** - * Configures :ref:`internal redirect ` behavior. - */ -export enum _envoy_api_v2_route_RouteAction_InternalRedirectAction { - PASS_THROUGH_INTERNAL_REDIRECT = 0, - HANDLE_INTERNAL_REDIRECT = 1, -} - -export interface _envoy_api_v2_route_RouteAction_HashPolicy_QueryParameter { - /** - * The name of the URL query parameter that will be used to obtain the hash - * key. If the parameter is not present, no hash will be produced. Query - * parameter names are case-sensitive. - */ - 'name'?: (string); -} - -export interface _envoy_api_v2_route_RouteAction_HashPolicy_QueryParameter__Output { - /** - * The name of the URL query parameter that will be used to obtain the hash - * key. If the parameter is not present, no hash will be produced. Query - * parameter names are case-sensitive. - */ - 'name': (string); -} - -/** - * The router is capable of shadowing traffic from one cluster to another. The current - * implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to - * respond before returning the response from the primary cluster. All normal statistics are - * collected for the shadow cluster making this feature useful for testing. - * - * During shadowing, the host/authority header is altered such that *-shadow* is appended. This is - * useful for logging. For example, *cluster1* becomes *cluster1-shadow*. - * - * .. note:: - * - * Shadowing will not be triggered if the primary cluster does not exist. - */ -export interface _envoy_api_v2_route_RouteAction_RequestMirrorPolicy { - /** - * Specifies the cluster that requests will be mirrored to. The cluster must - * exist in the cluster manager configuration. - */ - 'cluster'?: (string); - /** - * If not specified, all requests to the target cluster will be mirrored. If - * specified, Envoy will lookup the runtime key to get the % of requests to - * mirror. Valid values are from 0 to 10000, allowing for increments of - * 0.01% of requests to be mirrored. If the runtime key is specified in the - * configuration but not present in runtime, 0 is the default and thus 0% of - * requests will be mirrored. - * - * .. attention:: - * - * **This field is deprecated**. Set the - * :ref:`runtime_fraction - * ` - * field instead. Mirroring occurs if both this and - * ` - * are not set. - */ - 'runtime_key'?: (string); - /** - * If not specified, all requests to the target cluster will be mirrored. - * - * If specified, this field takes precedence over the `runtime_key` field and requests must also - * fall under the percentage of matches indicated by this field. - * - * For some fraction N/D, a random number in the range [0,D) is selected. If the - * number is <= the value of the numerator N, or if the key is not present, the default - * value, the request will be mirrored. - */ - 'runtime_fraction'?: (_envoy_api_v2_core_RuntimeFractionalPercent); - /** - * Determines if the trace span should be sampled. Defaults to true. - */ - 'trace_sampled'?: (_google_protobuf_BoolValue); -} - -/** - * The router is capable of shadowing traffic from one cluster to another. The current - * implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to - * respond before returning the response from the primary cluster. All normal statistics are - * collected for the shadow cluster making this feature useful for testing. - * - * During shadowing, the host/authority header is altered such that *-shadow* is appended. This is - * useful for logging. For example, *cluster1* becomes *cluster1-shadow*. - * - * .. note:: - * - * Shadowing will not be triggered if the primary cluster does not exist. - */ -export interface _envoy_api_v2_route_RouteAction_RequestMirrorPolicy__Output { - /** - * Specifies the cluster that requests will be mirrored to. The cluster must - * exist in the cluster manager configuration. - */ - 'cluster': (string); - /** - * If not specified, all requests to the target cluster will be mirrored. If - * specified, Envoy will lookup the runtime key to get the % of requests to - * mirror. Valid values are from 0 to 10000, allowing for increments of - * 0.01% of requests to be mirrored. If the runtime key is specified in the - * configuration but not present in runtime, 0 is the default and thus 0% of - * requests will be mirrored. - * - * .. attention:: - * - * **This field is deprecated**. Set the - * :ref:`runtime_fraction - * ` - * field instead. Mirroring occurs if both this and - * ` - * are not set. - */ - 'runtime_key': (string); - /** - * If not specified, all requests to the target cluster will be mirrored. - * - * If specified, this field takes precedence over the `runtime_key` field and requests must also - * fall under the percentage of matches indicated by this field. - * - * For some fraction N/D, a random number in the range [0,D) is selected. If the - * number is <= the value of the numerator N, or if the key is not present, the default - * value, the request will be mirrored. - */ - 'runtime_fraction'?: (_envoy_api_v2_core_RuntimeFractionalPercent__Output); - /** - * Determines if the trace span should be sampled. Defaults to true. - */ - 'trace_sampled'?: (_google_protobuf_BoolValue__Output); -} - -/** - * Allows enabling and disabling upgrades on a per-route basis. - * This overrides any enabled/disabled upgrade filter chain specified in the - * HttpConnectionManager - * :ref:`upgrade_configs - * ` - * but does not affect any custom filter chain specified there. - */ -export interface _envoy_api_v2_route_RouteAction_UpgradeConfig { - /** - * The case-insensitive name of this upgrade, e.g. "websocket". - * For each upgrade type present in upgrade_configs, requests with - * Upgrade: [upgrade_type] will be proxied upstream. - */ - 'upgrade_type'?: (string); - /** - * Determines if upgrades are available on this route. Defaults to true. - */ - 'enabled'?: (_google_protobuf_BoolValue); -} - -/** - * Allows enabling and disabling upgrades on a per-route basis. - * This overrides any enabled/disabled upgrade filter chain specified in the - * HttpConnectionManager - * :ref:`upgrade_configs - * ` - * but does not affect any custom filter chain specified there. - */ -export interface _envoy_api_v2_route_RouteAction_UpgradeConfig__Output { - /** - * The case-insensitive name of this upgrade, e.g. "websocket". - * For each upgrade type present in upgrade_configs, requests with - * Upgrade: [upgrade_type] will be proxied upstream. - */ - 'upgrade_type': (string); - /** - * Determines if upgrades are available on this route. Defaults to true. - */ - 'enabled'?: (_google_protobuf_BoolValue__Output); -} - -/** - * [#next-free-field: 34] - */ -export interface RouteAction { - /** - * Indicates the upstream cluster to which the request should be routed - * to. - */ - 'cluster'?: (string); - /** - * Envoy will determine the cluster to route to by reading the value of the - * HTTP header named by cluster_header from the request headers. If the - * header is not found or the referenced cluster does not exist, Envoy will - * return a 404 response. - * - * .. attention:: - * - * Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 - * *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. - */ - 'cluster_header'?: (string); - /** - * Multiple upstream clusters can be specified for a given route. The - * request is routed to one of the upstream clusters based on weights - * assigned to each cluster. See - * :ref:`traffic splitting ` - * for additional documentation. - */ - 'weighted_clusters'?: (_envoy_api_v2_route_WeightedCluster); - /** - * Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints - * in the upstream cluster with metadata matching what's set in this field will be considered - * for load balancing. If using :ref:`weighted_clusters - * `, metadata will be merged, with values - * provided there taking precedence. The filter name should be specified as *envoy.lb*. - */ - 'metadata_match'?: (_envoy_api_v2_core_Metadata); - /** - * Indicates that during forwarding, the matched prefix (or path) should be - * swapped with this value. This option allows application URLs to be rooted - * at a different path from those exposed at the reverse proxy layer. The router filter will - * place the original path before rewrite into the :ref:`x-envoy-original-path - * ` header. - * - * Only one of *prefix_rewrite* or - * :ref:`regex_rewrite ` - * may be specified. - * - * .. attention:: - * - * Pay careful attention to the use of trailing slashes in the - * :ref:`route's match ` prefix value. - * Stripping a prefix from a path requires multiple Routes to handle all cases. For example, - * rewriting * /prefix* to * /* and * /prefix/etc* to * /etc* cannot be done in a single - * :ref:`Route `, as shown by the below config entries: - * - * .. code-block:: yaml - * - * - match: - * prefix: "/prefix/" - * route: - * prefix_rewrite: "/" - * - match: - * prefix: "/prefix" - * route: - * prefix_rewrite: "/" - * - * Having above entries in the config, requests to * /prefix* will be stripped to * /*, while - * requests to * /prefix/etc* will be stripped to * /etc*. - */ - 'prefix_rewrite'?: (string); - /** - * Indicates that during forwarding, the host header will be swapped with - * this value. - */ - 'host_rewrite'?: (string); - /** - * Indicates that during forwarding, the host header will be swapped with - * the hostname of the upstream host chosen by the cluster manager. This - * option is applicable only when the destination cluster for a route is of - * type *strict_dns* or *logical_dns*. Setting this to true with other cluster - * types has no effect. - */ - 'auto_host_rewrite'?: (_google_protobuf_BoolValue); - /** - * Specifies the upstream timeout for the route. If not specified, the default is 15s. This - * spans between the point at which the entire downstream request (i.e. end-of-stream) has been - * processed and when the upstream response has been completely processed. A value of 0 will - * disable the route's timeout. - * - * .. note:: - * - * This timeout includes all retries. See also - * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, - * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the - * :ref:`retry overview `. - */ - 'timeout'?: (_google_protobuf_Duration); - /** - * Indicates that the route has a retry policy. Note that if this is set, - * it'll take precedence over the virtual host level retry policy entirely - * (e.g.: policies are not merged, most internal one becomes the enforced policy). - */ - 'retry_policy'?: (_envoy_api_v2_route_RetryPolicy); - /** - * Indicates that the route has a request mirroring policy. - * - * .. attention:: - * This field has been deprecated in favor of `request_mirror_policies` which supports one or - * more mirroring policies. - */ - 'request_mirror_policy'?: (_envoy_api_v2_route_RouteAction_RequestMirrorPolicy); - /** - * Optionally specifies the :ref:`routing priority `. - */ - 'priority'?: (_envoy_api_v2_core_RoutingPriority | keyof typeof _envoy_api_v2_core_RoutingPriority); - /** - * Specifies a set of rate limit configurations that could be applied to the - * route. - */ - 'rate_limits'?: (_envoy_api_v2_route_RateLimit)[]; - /** - * Specifies if the rate limit filter should include the virtual host rate - * limits. By default, if the route configured rate limits, the virtual host - * :ref:`rate_limits ` are not applied to the - * request. - */ - 'include_vh_rate_limits'?: (_google_protobuf_BoolValue); - /** - * Specifies a list of hash policies to use for ring hash load balancing. Each - * hash policy is evaluated individually and the combined result is used to - * route the request. The method of combination is deterministic such that - * identical lists of hash policies will produce the same hash. Since a hash - * policy examines specific parts of a request, it can fail to produce a hash - * (i.e. if the hashed header is not present). If (and only if) all configured - * hash policies fail to generate a hash, no hash will be produced for - * the route. In this case, the behavior is the same as if no hash policies - * were specified (i.e. the ring hash load balancer will choose a random - * backend). If a hash policy has the "terminal" attribute set to true, and - * there is already a hash generated, the hash is returned immediately, - * ignoring the rest of the hash policy list. - */ - 'hash_policy'?: (_envoy_api_v2_route_RouteAction_HashPolicy)[]; - /** - * Indicates that the route has a CORS policy. - */ - 'cors'?: (_envoy_api_v2_route_CorsPolicy); - /** - * The HTTP status code to use when configured cluster is not found. - * The default response code is 503 Service Unavailable. - */ - 'cluster_not_found_response_code'?: (_envoy_api_v2_route_RouteAction_ClusterNotFoundResponseCode | keyof typeof _envoy_api_v2_route_RouteAction_ClusterNotFoundResponseCode); - /** - * If present, and the request is a gRPC request, use the - * `grpc-timeout header `_, - * or its default value (infinity) instead of - * :ref:`timeout `, but limit the applied timeout - * to the maximum value specified here. If configured as 0, the maximum allowed timeout for - * gRPC requests is infinity. If not configured at all, the `grpc-timeout` header is not used - * and gRPC requests time out like any other requests using - * :ref:`timeout ` or its default. - * This can be used to prevent unexpected upstream request timeouts due to potentially long - * time gaps between gRPC request and response in gRPC streaming mode. - * - * .. note:: - * - * If a timeout is specified using :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes - * precedence over `grpc-timeout header `_, when - * both are present. See also - * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, - * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the - * :ref:`retry overview `. - */ - 'max_grpc_timeout'?: (_google_protobuf_Duration); - /** - * Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, - * although the connection manager wide :ref:`stream_idle_timeout - * ` - * will still apply. A value of 0 will completely disable the route's idle timeout, even if a - * connection manager stream idle timeout is configured. - * - * The idle timeout is distinct to :ref:`timeout - * `, which provides an upper bound - * on the upstream response time; :ref:`idle_timeout - * ` instead bounds the amount - * of time the request's stream may be idle. - * - * After header decoding, the idle timeout will apply on downstream and - * upstream request events. Each time an encode/decode event for headers or - * data is processed for the stream, the timer will be reset. If the timeout - * fires, the stream is terminated with a 408 Request Timeout error code if no - * upstream response header has been received, otherwise a stream reset - * occurs. - */ - 'idle_timeout'?: (_google_protobuf_Duration); - 'upgrade_configs'?: (_envoy_api_v2_route_RouteAction_UpgradeConfig)[]; - 'internal_redirect_action'?: (_envoy_api_v2_route_RouteAction_InternalRedirectAction | keyof typeof _envoy_api_v2_route_RouteAction_InternalRedirectAction); - /** - * Indicates that the route has a hedge policy. Note that if this is set, - * it'll take precedence over the virtual host level hedge policy entirely - * (e.g.: policies are not merged, most internal one becomes the enforced policy). - */ - 'hedge_policy'?: (_envoy_api_v2_route_HedgePolicy); - /** - * If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting - * the provided duration from the header. This is useful in allowing Envoy to set its global - * timeout to be less than that of the deadline imposed by the calling client, which makes it more - * likely that Envoy will handle the timeout instead of having the call canceled by the client. - * The offset will only be applied if the provided grpc_timeout is greater than the offset. This - * ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning - * infinity). - */ - 'grpc_timeout_offset'?: (_google_protobuf_Duration); - /** - * Indicates that during forwarding, the host header will be swapped with the content of given - * downstream or :ref:`custom ` header. - * If header value is empty, host header is left intact. - * - * .. attention:: - * - * Pay attention to the potential security implications of using this option. Provided header - * must come from trusted source. - */ - 'auto_host_rewrite_header'?: (string); - /** - * Indicates that the route has request mirroring policies. - */ - 'request_mirror_policies'?: (_envoy_api_v2_route_RouteAction_RequestMirrorPolicy)[]; - /** - * An internal redirect is handled, iff the number of previous internal redirects that a - * downstream request has encountered is lower than this value, and - * :ref:`internal_redirect_action ` - * is set to :ref:`HANDLE_INTERNAL_REDIRECT - * ` - * In the case where a downstream request is bounced among multiple routes by internal redirect, - * the first route that hits this threshold, or has - * :ref:`internal_redirect_action ` - * set to - * :ref:`PASS_THROUGH_INTERNAL_REDIRECT - * ` - * will pass the redirect back to downstream. - * - * If not specified, at most one redirect will be followed. - */ - 'max_internal_redirects'?: (_google_protobuf_UInt32Value); - /** - * Indicates that during forwarding, portions of the path that match the - * pattern should be rewritten, even allowing the substitution of capture - * groups from the pattern into the new path as specified by the rewrite - * substitution string. This is useful to allow application paths to be - * rewritten in a way that is aware of segments with variable content like - * identifiers. The router filter will place the original path as it was - * before the rewrite into the :ref:`x-envoy-original-path - * ` header. - * - * Only one of :ref:`prefix_rewrite ` - * or *regex_rewrite* may be specified. - * - * Examples using Google's `RE2 `_ engine: - * - * * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution - * string of ``\2/instance/\1`` would transform ``/service/foo/v1/api`` - * into ``/v1/api/instance/foo``. - * - * * The pattern ``one`` paired with a substitution string of ``two`` would - * transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``. - * - * * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of - * ``\1two\2`` would replace only the first occurrence of ``one``, - * transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``. - * - * * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` - * would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to - * ``/aaa/yyy/bbb``. - */ - 'regex_rewrite'?: (_envoy_type_matcher_RegexMatchAndSubstitute); - /** - * [#not-implemented-hide:] - * Specifies the configuration for retry policy extension. Note that if this is set, it'll take - * precedence over the virtual host level retry policy entirely (e.g.: policies are not merged, - * most internal one becomes the enforced policy). :ref:`Retry policy ` - * should not be set if this field is used. - */ - 'retry_policy_typed_config'?: (_google_protobuf_Any); - 'cluster_specifier'?: "cluster"|"cluster_header"|"weighted_clusters"; - 'host_rewrite_specifier'?: "host_rewrite"|"auto_host_rewrite"|"auto_host_rewrite_header"; -} - -/** - * [#next-free-field: 34] - */ -export interface RouteAction__Output { - /** - * Indicates the upstream cluster to which the request should be routed - * to. - */ - 'cluster'?: (string); - /** - * Envoy will determine the cluster to route to by reading the value of the - * HTTP header named by cluster_header from the request headers. If the - * header is not found or the referenced cluster does not exist, Envoy will - * return a 404 response. - * - * .. attention:: - * - * Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 - * *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. - */ - 'cluster_header'?: (string); - /** - * Multiple upstream clusters can be specified for a given route. The - * request is routed to one of the upstream clusters based on weights - * assigned to each cluster. See - * :ref:`traffic splitting ` - * for additional documentation. - */ - 'weighted_clusters'?: (_envoy_api_v2_route_WeightedCluster__Output); - /** - * Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints - * in the upstream cluster with metadata matching what's set in this field will be considered - * for load balancing. If using :ref:`weighted_clusters - * `, metadata will be merged, with values - * provided there taking precedence. The filter name should be specified as *envoy.lb*. - */ - 'metadata_match'?: (_envoy_api_v2_core_Metadata__Output); - /** - * Indicates that during forwarding, the matched prefix (or path) should be - * swapped with this value. This option allows application URLs to be rooted - * at a different path from those exposed at the reverse proxy layer. The router filter will - * place the original path before rewrite into the :ref:`x-envoy-original-path - * ` header. - * - * Only one of *prefix_rewrite* or - * :ref:`regex_rewrite ` - * may be specified. - * - * .. attention:: - * - * Pay careful attention to the use of trailing slashes in the - * :ref:`route's match ` prefix value. - * Stripping a prefix from a path requires multiple Routes to handle all cases. For example, - * rewriting * /prefix* to * /* and * /prefix/etc* to * /etc* cannot be done in a single - * :ref:`Route `, as shown by the below config entries: - * - * .. code-block:: yaml - * - * - match: - * prefix: "/prefix/" - * route: - * prefix_rewrite: "/" - * - match: - * prefix: "/prefix" - * route: - * prefix_rewrite: "/" - * - * Having above entries in the config, requests to * /prefix* will be stripped to * /*, while - * requests to * /prefix/etc* will be stripped to * /etc*. - */ - 'prefix_rewrite': (string); - /** - * Indicates that during forwarding, the host header will be swapped with - * this value. - */ - 'host_rewrite'?: (string); - /** - * Indicates that during forwarding, the host header will be swapped with - * the hostname of the upstream host chosen by the cluster manager. This - * option is applicable only when the destination cluster for a route is of - * type *strict_dns* or *logical_dns*. Setting this to true with other cluster - * types has no effect. - */ - 'auto_host_rewrite'?: (_google_protobuf_BoolValue__Output); - /** - * Specifies the upstream timeout for the route. If not specified, the default is 15s. This - * spans between the point at which the entire downstream request (i.e. end-of-stream) has been - * processed and when the upstream response has been completely processed. A value of 0 will - * disable the route's timeout. - * - * .. note:: - * - * This timeout includes all retries. See also - * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, - * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the - * :ref:`retry overview `. - */ - 'timeout'?: (_google_protobuf_Duration__Output); - /** - * Indicates that the route has a retry policy. Note that if this is set, - * it'll take precedence over the virtual host level retry policy entirely - * (e.g.: policies are not merged, most internal one becomes the enforced policy). - */ - 'retry_policy'?: (_envoy_api_v2_route_RetryPolicy__Output); - /** - * Indicates that the route has a request mirroring policy. - * - * .. attention:: - * This field has been deprecated in favor of `request_mirror_policies` which supports one or - * more mirroring policies. - */ - 'request_mirror_policy'?: (_envoy_api_v2_route_RouteAction_RequestMirrorPolicy__Output); - /** - * Optionally specifies the :ref:`routing priority `. - */ - 'priority': (keyof typeof _envoy_api_v2_core_RoutingPriority); - /** - * Specifies a set of rate limit configurations that could be applied to the - * route. - */ - 'rate_limits': (_envoy_api_v2_route_RateLimit__Output)[]; - /** - * Specifies if the rate limit filter should include the virtual host rate - * limits. By default, if the route configured rate limits, the virtual host - * :ref:`rate_limits ` are not applied to the - * request. - */ - 'include_vh_rate_limits'?: (_google_protobuf_BoolValue__Output); - /** - * Specifies a list of hash policies to use for ring hash load balancing. Each - * hash policy is evaluated individually and the combined result is used to - * route the request. The method of combination is deterministic such that - * identical lists of hash policies will produce the same hash. Since a hash - * policy examines specific parts of a request, it can fail to produce a hash - * (i.e. if the hashed header is not present). If (and only if) all configured - * hash policies fail to generate a hash, no hash will be produced for - * the route. In this case, the behavior is the same as if no hash policies - * were specified (i.e. the ring hash load balancer will choose a random - * backend). If a hash policy has the "terminal" attribute set to true, and - * there is already a hash generated, the hash is returned immediately, - * ignoring the rest of the hash policy list. - */ - 'hash_policy': (_envoy_api_v2_route_RouteAction_HashPolicy__Output)[]; - /** - * Indicates that the route has a CORS policy. - */ - 'cors'?: (_envoy_api_v2_route_CorsPolicy__Output); - /** - * The HTTP status code to use when configured cluster is not found. - * The default response code is 503 Service Unavailable. - */ - 'cluster_not_found_response_code': (keyof typeof _envoy_api_v2_route_RouteAction_ClusterNotFoundResponseCode); - /** - * If present, and the request is a gRPC request, use the - * `grpc-timeout header `_, - * or its default value (infinity) instead of - * :ref:`timeout `, but limit the applied timeout - * to the maximum value specified here. If configured as 0, the maximum allowed timeout for - * gRPC requests is infinity. If not configured at all, the `grpc-timeout` header is not used - * and gRPC requests time out like any other requests using - * :ref:`timeout ` or its default. - * This can be used to prevent unexpected upstream request timeouts due to potentially long - * time gaps between gRPC request and response in gRPC streaming mode. - * - * .. note:: - * - * If a timeout is specified using :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes - * precedence over `grpc-timeout header `_, when - * both are present. See also - * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, - * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the - * :ref:`retry overview `. - */ - 'max_grpc_timeout'?: (_google_protobuf_Duration__Output); - /** - * Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, - * although the connection manager wide :ref:`stream_idle_timeout - * ` - * will still apply. A value of 0 will completely disable the route's idle timeout, even if a - * connection manager stream idle timeout is configured. - * - * The idle timeout is distinct to :ref:`timeout - * `, which provides an upper bound - * on the upstream response time; :ref:`idle_timeout - * ` instead bounds the amount - * of time the request's stream may be idle. - * - * After header decoding, the idle timeout will apply on downstream and - * upstream request events. Each time an encode/decode event for headers or - * data is processed for the stream, the timer will be reset. If the timeout - * fires, the stream is terminated with a 408 Request Timeout error code if no - * upstream response header has been received, otherwise a stream reset - * occurs. - */ - 'idle_timeout'?: (_google_protobuf_Duration__Output); - 'upgrade_configs': (_envoy_api_v2_route_RouteAction_UpgradeConfig__Output)[]; - 'internal_redirect_action': (keyof typeof _envoy_api_v2_route_RouteAction_InternalRedirectAction); - /** - * Indicates that the route has a hedge policy. Note that if this is set, - * it'll take precedence over the virtual host level hedge policy entirely - * (e.g.: policies are not merged, most internal one becomes the enforced policy). - */ - 'hedge_policy'?: (_envoy_api_v2_route_HedgePolicy__Output); - /** - * If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting - * the provided duration from the header. This is useful in allowing Envoy to set its global - * timeout to be less than that of the deadline imposed by the calling client, which makes it more - * likely that Envoy will handle the timeout instead of having the call canceled by the client. - * The offset will only be applied if the provided grpc_timeout is greater than the offset. This - * ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning - * infinity). - */ - 'grpc_timeout_offset'?: (_google_protobuf_Duration__Output); - /** - * Indicates that during forwarding, the host header will be swapped with the content of given - * downstream or :ref:`custom ` header. - * If header value is empty, host header is left intact. - * - * .. attention:: - * - * Pay attention to the potential security implications of using this option. Provided header - * must come from trusted source. - */ - 'auto_host_rewrite_header'?: (string); - /** - * Indicates that the route has request mirroring policies. - */ - 'request_mirror_policies': (_envoy_api_v2_route_RouteAction_RequestMirrorPolicy__Output)[]; - /** - * An internal redirect is handled, iff the number of previous internal redirects that a - * downstream request has encountered is lower than this value, and - * :ref:`internal_redirect_action ` - * is set to :ref:`HANDLE_INTERNAL_REDIRECT - * ` - * In the case where a downstream request is bounced among multiple routes by internal redirect, - * the first route that hits this threshold, or has - * :ref:`internal_redirect_action ` - * set to - * :ref:`PASS_THROUGH_INTERNAL_REDIRECT - * ` - * will pass the redirect back to downstream. - * - * If not specified, at most one redirect will be followed. - */ - 'max_internal_redirects'?: (_google_protobuf_UInt32Value__Output); - /** - * Indicates that during forwarding, portions of the path that match the - * pattern should be rewritten, even allowing the substitution of capture - * groups from the pattern into the new path as specified by the rewrite - * substitution string. This is useful to allow application paths to be - * rewritten in a way that is aware of segments with variable content like - * identifiers. The router filter will place the original path as it was - * before the rewrite into the :ref:`x-envoy-original-path - * ` header. - * - * Only one of :ref:`prefix_rewrite ` - * or *regex_rewrite* may be specified. - * - * Examples using Google's `RE2 `_ engine: - * - * * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution - * string of ``\2/instance/\1`` would transform ``/service/foo/v1/api`` - * into ``/v1/api/instance/foo``. - * - * * The pattern ``one`` paired with a substitution string of ``two`` would - * transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``. - * - * * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of - * ``\1two\2`` would replace only the first occurrence of ``one``, - * transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``. - * - * * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` - * would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to - * ``/aaa/yyy/bbb``. - */ - 'regex_rewrite'?: (_envoy_type_matcher_RegexMatchAndSubstitute__Output); - /** - * [#not-implemented-hide:] - * Specifies the configuration for retry policy extension. Note that if this is set, it'll take - * precedence over the virtual host level retry policy entirely (e.g.: policies are not merged, - * most internal one becomes the enforced policy). :ref:`Retry policy ` - * should not be set if this field is used. - */ - 'retry_policy_typed_config'?: (_google_protobuf_Any__Output); - 'cluster_specifier': "cluster"|"cluster_header"|"weighted_clusters"; - 'host_rewrite_specifier': "host_rewrite"|"auto_host_rewrite"|"auto_host_rewrite_header"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/RouteMatch.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/route/RouteMatch.ts deleted file mode 100644 index b055c0506..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/RouteMatch.ts +++ /dev/null @@ -1,247 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/route/route_components.proto - -import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; -import type { HeaderMatcher as _envoy_api_v2_route_HeaderMatcher, HeaderMatcher__Output as _envoy_api_v2_route_HeaderMatcher__Output } from '../../../../envoy/api/v2/route/HeaderMatcher'; -import type { QueryParameterMatcher as _envoy_api_v2_route_QueryParameterMatcher, QueryParameterMatcher__Output as _envoy_api_v2_route_QueryParameterMatcher__Output } from '../../../../envoy/api/v2/route/QueryParameterMatcher'; -import type { RuntimeFractionalPercent as _envoy_api_v2_core_RuntimeFractionalPercent, RuntimeFractionalPercent__Output as _envoy_api_v2_core_RuntimeFractionalPercent__Output } from '../../../../envoy/api/v2/core/RuntimeFractionalPercent'; -import type { RegexMatcher as _envoy_type_matcher_RegexMatcher, RegexMatcher__Output as _envoy_type_matcher_RegexMatcher__Output } from '../../../../envoy/type/matcher/RegexMatcher'; - -export interface _envoy_api_v2_route_RouteMatch_GrpcRouteMatchOptions { -} - -export interface _envoy_api_v2_route_RouteMatch_GrpcRouteMatchOptions__Output { -} - -export interface _envoy_api_v2_route_RouteMatch_TlsContextMatchOptions { - /** - * If specified, the route will match against whether or not a certificate is presented. - * If not specified, certificate presentation status (true or false) will not be considered when route matching. - */ - 'presented'?: (_google_protobuf_BoolValue); - /** - * If specified, the route will match against whether or not a certificate is validated. - * If not specified, certificate validation status (true or false) will not be considered when route matching. - */ - 'validated'?: (_google_protobuf_BoolValue); -} - -export interface _envoy_api_v2_route_RouteMatch_TlsContextMatchOptions__Output { - /** - * If specified, the route will match against whether or not a certificate is presented. - * If not specified, certificate presentation status (true or false) will not be considered when route matching. - */ - 'presented'?: (_google_protobuf_BoolValue__Output); - /** - * If specified, the route will match against whether or not a certificate is validated. - * If not specified, certificate validation status (true or false) will not be considered when route matching. - */ - 'validated'?: (_google_protobuf_BoolValue__Output); -} - -/** - * [#next-free-field: 12] - */ -export interface RouteMatch { - /** - * If specified, the route is a prefix rule meaning that the prefix must - * match the beginning of the *:path* header. - */ - 'prefix'?: (string); - /** - * If specified, the route is an exact path rule meaning that the path must - * exactly match the *:path* header once the query string is removed. - */ - 'path'?: (string); - /** - * If specified, the route is a regular expression rule meaning that the - * regex must match the *:path* header once the query string is removed. The entire path - * (without the query string) must match the regex. The rule will not match if only a - * subsequence of the *:path* header matches the regex. The regex grammar is defined `here - * `_. - * - * Examples: - * - * * The regex ``/b[io]t`` matches the path * /bit* - * * The regex ``/b[io]t`` matches the path * /bot* - * * The regex ``/b[io]t`` does not match the path * /bite* - * * The regex ``/b[io]t`` does not match the path * /bit/bot* - * - * .. attention:: - * This field has been deprecated in favor of `safe_regex` as it is not safe for use with - * untrusted input in all cases. - */ - 'regex'?: (string); - /** - * Indicates that prefix/path matching should be case insensitive. The default - * is true. - */ - 'case_sensitive'?: (_google_protobuf_BoolValue); - /** - * Specifies a set of headers that the route should match on. The router will - * check the request’s headers against all the specified headers in the route - * config. A match will happen if all the headers in the route are present in - * the request with the same values (or based on presence if the value field - * is not in the config). - */ - 'headers'?: (_envoy_api_v2_route_HeaderMatcher)[]; - /** - * Specifies a set of URL query parameters on which the route should - * match. The router will check the query string from the *path* header - * against all the specified query parameters. If the number of specified - * query parameters is nonzero, they all must match the *path* header's - * query string for a match to occur. - */ - 'query_parameters'?: (_envoy_api_v2_route_QueryParameterMatcher)[]; - /** - * If specified, only gRPC requests will be matched. The router will check - * that the content-type header has a application/grpc or one of the various - * application/grpc+ values. - */ - 'grpc'?: (_envoy_api_v2_route_RouteMatch_GrpcRouteMatchOptions); - /** - * Indicates that the route should additionally match on a runtime key. Every time the route - * is considered for a match, it must also fall under the percentage of matches indicated by - * this field. For some fraction N/D, a random number in the range [0,D) is selected. If the - * number is <= the value of the numerator N, or if the key is not present, the default - * value, the router continues to evaluate the remaining match criteria. A runtime_fraction - * route configuration can be used to roll out route changes in a gradual manner without full - * code/config deploys. Refer to the :ref:`traffic shifting - * ` docs for additional documentation. - * - * .. note:: - * - * Parsing this field is implemented such that the runtime key's data may be represented - * as a FractionalPercent proto represented as JSON/YAML and may also be represented as an - * integer with the assumption that the value is an integral percentage out of 100. For - * instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent - * whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. - */ - 'runtime_fraction'?: (_envoy_api_v2_core_RuntimeFractionalPercent); - /** - * If specified, the route is a regular expression rule meaning that the - * regex must match the *:path* header once the query string is removed. The entire path - * (without the query string) must match the regex. The rule will not match if only a - * subsequence of the *:path* header matches the regex. - * - * [#next-major-version: In the v3 API we should redo how path specification works such - * that we utilize StringMatcher, and additionally have consistent options around whether we - * strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive - * to deprecate the existing options. We should even consider whether we want to do away with - * path_specifier entirely and just rely on a set of header matchers which can already match - * on :path, etc. The issue with that is it is unclear how to generically deal with query string - * stripping. This needs more thought.] - */ - 'safe_regex'?: (_envoy_type_matcher_RegexMatcher); - /** - * If specified, the client tls context will be matched against the defined - * match options. - * - * [#next-major-version: unify with RBAC] - */ - 'tls_context'?: (_envoy_api_v2_route_RouteMatch_TlsContextMatchOptions); - 'path_specifier'?: "prefix"|"path"|"regex"|"safe_regex"; -} - -/** - * [#next-free-field: 12] - */ -export interface RouteMatch__Output { - /** - * If specified, the route is a prefix rule meaning that the prefix must - * match the beginning of the *:path* header. - */ - 'prefix'?: (string); - /** - * If specified, the route is an exact path rule meaning that the path must - * exactly match the *:path* header once the query string is removed. - */ - 'path'?: (string); - /** - * If specified, the route is a regular expression rule meaning that the - * regex must match the *:path* header once the query string is removed. The entire path - * (without the query string) must match the regex. The rule will not match if only a - * subsequence of the *:path* header matches the regex. The regex grammar is defined `here - * `_. - * - * Examples: - * - * * The regex ``/b[io]t`` matches the path * /bit* - * * The regex ``/b[io]t`` matches the path * /bot* - * * The regex ``/b[io]t`` does not match the path * /bite* - * * The regex ``/b[io]t`` does not match the path * /bit/bot* - * - * .. attention:: - * This field has been deprecated in favor of `safe_regex` as it is not safe for use with - * untrusted input in all cases. - */ - 'regex'?: (string); - /** - * Indicates that prefix/path matching should be case insensitive. The default - * is true. - */ - 'case_sensitive'?: (_google_protobuf_BoolValue__Output); - /** - * Specifies a set of headers that the route should match on. The router will - * check the request’s headers against all the specified headers in the route - * config. A match will happen if all the headers in the route are present in - * the request with the same values (or based on presence if the value field - * is not in the config). - */ - 'headers': (_envoy_api_v2_route_HeaderMatcher__Output)[]; - /** - * Specifies a set of URL query parameters on which the route should - * match. The router will check the query string from the *path* header - * against all the specified query parameters. If the number of specified - * query parameters is nonzero, they all must match the *path* header's - * query string for a match to occur. - */ - 'query_parameters': (_envoy_api_v2_route_QueryParameterMatcher__Output)[]; - /** - * If specified, only gRPC requests will be matched. The router will check - * that the content-type header has a application/grpc or one of the various - * application/grpc+ values. - */ - 'grpc'?: (_envoy_api_v2_route_RouteMatch_GrpcRouteMatchOptions__Output); - /** - * Indicates that the route should additionally match on a runtime key. Every time the route - * is considered for a match, it must also fall under the percentage of matches indicated by - * this field. For some fraction N/D, a random number in the range [0,D) is selected. If the - * number is <= the value of the numerator N, or if the key is not present, the default - * value, the router continues to evaluate the remaining match criteria. A runtime_fraction - * route configuration can be used to roll out route changes in a gradual manner without full - * code/config deploys. Refer to the :ref:`traffic shifting - * ` docs for additional documentation. - * - * .. note:: - * - * Parsing this field is implemented such that the runtime key's data may be represented - * as a FractionalPercent proto represented as JSON/YAML and may also be represented as an - * integer with the assumption that the value is an integral percentage out of 100. For - * instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent - * whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. - */ - 'runtime_fraction'?: (_envoy_api_v2_core_RuntimeFractionalPercent__Output); - /** - * If specified, the route is a regular expression rule meaning that the - * regex must match the *:path* header once the query string is removed. The entire path - * (without the query string) must match the regex. The rule will not match if only a - * subsequence of the *:path* header matches the regex. - * - * [#next-major-version: In the v3 API we should redo how path specification works such - * that we utilize StringMatcher, and additionally have consistent options around whether we - * strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive - * to deprecate the existing options. We should even consider whether we want to do away with - * path_specifier entirely and just rely on a set of header matchers which can already match - * on :path, etc. The issue with that is it is unclear how to generically deal with query string - * stripping. This needs more thought.] - */ - 'safe_regex'?: (_envoy_type_matcher_RegexMatcher__Output); - /** - * If specified, the client tls context will be matched against the defined - * match options. - * - * [#next-major-version: unify with RBAC] - */ - 'tls_context'?: (_envoy_api_v2_route_RouteMatch_TlsContextMatchOptions__Output); - 'path_specifier': "prefix"|"path"|"regex"|"safe_regex"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/WeightedCluster.ts b/packages/grpc-js-xds/src/generated/envoy/api/v2/route/WeightedCluster.ts deleted file mode 100644 index 5b283404b..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/WeightedCluster.ts +++ /dev/null @@ -1,213 +0,0 @@ -// Original file: deps/envoy-api/envoy/api/v2/route/route_components.proto - -import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; -import type { Metadata as _envoy_api_v2_core_Metadata, Metadata__Output as _envoy_api_v2_core_Metadata__Output } from '../../../../envoy/api/v2/core/Metadata'; -import type { HeaderValueOption as _envoy_api_v2_core_HeaderValueOption, HeaderValueOption__Output as _envoy_api_v2_core_HeaderValueOption__Output } from '../../../../envoy/api/v2/core/HeaderValueOption'; -import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../google/protobuf/Struct'; -import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; - -/** - * [#next-free-field: 11] - */ -export interface _envoy_api_v2_route_WeightedCluster_ClusterWeight { - /** - * Name of the upstream cluster. The cluster must exist in the - * :ref:`cluster manager configuration `. - */ - 'name'?: (string); - /** - * An integer between 0 and :ref:`total_weight - * `. When a request matches the route, - * the choice of an upstream cluster is determined by its weight. The sum of weights across all - * entries in the clusters array must add up to the total_weight, which defaults to 100. - */ - 'weight'?: (_google_protobuf_UInt32Value); - /** - * Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - * the upstream cluster with metadata matching what is set in this field will be considered for - * load balancing. Note that this will be merged with what's provided in - * :ref:`RouteAction.metadata_match `, with - * values here taking precedence. The filter name should be specified as *envoy.lb*. - */ - 'metadata_match'?: (_envoy_api_v2_core_Metadata); - /** - * Specifies a list of headers to be added to requests when this cluster is selected - * through the enclosing :ref:`envoy_api_msg_route.RouteAction`. - * Headers specified at this level are applied before headers from the enclosing - * :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_route.VirtualHost`, and - * :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on - * header value syntax, see the documentation on :ref:`custom request headers - * `. - */ - 'request_headers_to_add'?: (_envoy_api_v2_core_HeaderValueOption)[]; - /** - * Specifies a list of HTTP headers that should be removed from each request when - * this cluster is selected through the enclosing :ref:`envoy_api_msg_route.RouteAction`. - */ - 'request_headers_to_remove'?: (string)[]; - /** - * Specifies a list of headers to be added to responses when this cluster is selected - * through the enclosing :ref:`envoy_api_msg_route.RouteAction`. - * Headers specified at this level are applied before headers from the enclosing - * :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_route.VirtualHost`, and - * :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on - * header value syntax, see the documentation on :ref:`custom request headers - * `. - */ - 'response_headers_to_add'?: (_envoy_api_v2_core_HeaderValueOption)[]; - /** - * Specifies a list of headers to be removed from responses when this cluster is selected - * through the enclosing :ref:`envoy_api_msg_route.RouteAction`. - */ - 'response_headers_to_remove'?: (string)[]; - /** - * The per_filter_config field can be used to provide weighted cluster-specific - * configurations for filters. The key should match the filter name, such as - * *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - * specific; see the :ref:`HTTP filter documentation ` - * for if and how it is utilized. - */ - 'per_filter_config'?: ({[key: string]: _google_protobuf_Struct}); - /** - * The per_filter_config field can be used to provide weighted cluster-specific - * configurations for filters. The key should match the filter name, such as - * *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - * specific; see the :ref:`HTTP filter documentation ` - * for if and how it is utilized. - */ - 'typed_per_filter_config'?: ({[key: string]: _google_protobuf_Any}); -} - -/** - * [#next-free-field: 11] - */ -export interface _envoy_api_v2_route_WeightedCluster_ClusterWeight__Output { - /** - * Name of the upstream cluster. The cluster must exist in the - * :ref:`cluster manager configuration `. - */ - 'name': (string); - /** - * An integer between 0 and :ref:`total_weight - * `. When a request matches the route, - * the choice of an upstream cluster is determined by its weight. The sum of weights across all - * entries in the clusters array must add up to the total_weight, which defaults to 100. - */ - 'weight'?: (_google_protobuf_UInt32Value__Output); - /** - * Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in - * the upstream cluster with metadata matching what is set in this field will be considered for - * load balancing. Note that this will be merged with what's provided in - * :ref:`RouteAction.metadata_match `, with - * values here taking precedence. The filter name should be specified as *envoy.lb*. - */ - 'metadata_match'?: (_envoy_api_v2_core_Metadata__Output); - /** - * Specifies a list of headers to be added to requests when this cluster is selected - * through the enclosing :ref:`envoy_api_msg_route.RouteAction`. - * Headers specified at this level are applied before headers from the enclosing - * :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_route.VirtualHost`, and - * :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on - * header value syntax, see the documentation on :ref:`custom request headers - * `. - */ - 'request_headers_to_add': (_envoy_api_v2_core_HeaderValueOption__Output)[]; - /** - * Specifies a list of HTTP headers that should be removed from each request when - * this cluster is selected through the enclosing :ref:`envoy_api_msg_route.RouteAction`. - */ - 'request_headers_to_remove': (string)[]; - /** - * Specifies a list of headers to be added to responses when this cluster is selected - * through the enclosing :ref:`envoy_api_msg_route.RouteAction`. - * Headers specified at this level are applied before headers from the enclosing - * :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_route.VirtualHost`, and - * :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on - * header value syntax, see the documentation on :ref:`custom request headers - * `. - */ - 'response_headers_to_add': (_envoy_api_v2_core_HeaderValueOption__Output)[]; - /** - * Specifies a list of headers to be removed from responses when this cluster is selected - * through the enclosing :ref:`envoy_api_msg_route.RouteAction`. - */ - 'response_headers_to_remove': (string)[]; - /** - * The per_filter_config field can be used to provide weighted cluster-specific - * configurations for filters. The key should match the filter name, such as - * *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - * specific; see the :ref:`HTTP filter documentation ` - * for if and how it is utilized. - */ - 'per_filter_config'?: ({[key: string]: _google_protobuf_Struct__Output}); - /** - * The per_filter_config field can be used to provide weighted cluster-specific - * configurations for filters. The key should match the filter name, such as - * *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - * specific; see the :ref:`HTTP filter documentation ` - * for if and how it is utilized. - */ - 'typed_per_filter_config'?: ({[key: string]: _google_protobuf_Any__Output}); -} - -/** - * Compared to the :ref:`cluster ` field that specifies a - * single upstream cluster as the target of a request, the :ref:`weighted_clusters - * ` option allows for specification of - * multiple upstream clusters along with weights that indicate the percentage of - * traffic to be forwarded to each cluster. The router selects an upstream cluster based on the - * weights. - */ -export interface WeightedCluster { - /** - * Specifies one or more upstream clusters associated with the route. - */ - 'clusters'?: (_envoy_api_v2_route_WeightedCluster_ClusterWeight)[]; - /** - * Specifies the runtime key prefix that should be used to construct the - * runtime keys associated with each cluster. When the *runtime_key_prefix* is - * specified, the router will look for weights associated with each upstream - * cluster under the key *runtime_key_prefix* + "." + *cluster[i].name* where - * *cluster[i]* denotes an entry in the clusters array field. If the runtime - * key for the cluster does not exist, the value specified in the - * configuration file will be used as the default weight. See the :ref:`runtime documentation - * ` for how key names map to the underlying implementation. - */ - 'runtime_key_prefix'?: (string); - /** - * Specifies the total weight across all clusters. The sum of all cluster weights must equal this - * value, which must be greater than 0. Defaults to 100. - */ - 'total_weight'?: (_google_protobuf_UInt32Value); -} - -/** - * Compared to the :ref:`cluster ` field that specifies a - * single upstream cluster as the target of a request, the :ref:`weighted_clusters - * ` option allows for specification of - * multiple upstream clusters along with weights that indicate the percentage of - * traffic to be forwarded to each cluster. The router selects an upstream cluster based on the - * weights. - */ -export interface WeightedCluster__Output { - /** - * Specifies one or more upstream clusters associated with the route. - */ - 'clusters': (_envoy_api_v2_route_WeightedCluster_ClusterWeight__Output)[]; - /** - * Specifies the runtime key prefix that should be used to construct the - * runtime keys associated with each cluster. When the *runtime_key_prefix* is - * specified, the router will look for weights associated with each upstream - * cluster under the key *runtime_key_prefix* + "." + *cluster[i].name* where - * *cluster[i]* denotes an entry in the clusters array field. If the runtime - * key for the cluster does not exist, the value specified in the - * configuration file will be used as the default weight. See the :ref:`runtime documentation - * ` for how key names map to the underlying implementation. - */ - 'runtime_key_prefix': (string); - /** - * Specifies the total weight across all clusters. The sum of all cluster weights must equal this - * value, which must be greater than 0. Defaults to 100. - */ - 'total_weight'?: (_google_protobuf_UInt32Value__Output); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/AccessLog.ts b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/AccessLog.ts new file mode 100644 index 000000000..73a031fdd --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/AccessLog.ts @@ -0,0 +1,38 @@ +// Original file: deps/envoy-api/envoy/config/accesslog/v3/accesslog.proto + +import type { AccessLogFilter as _envoy_config_accesslog_v3_AccessLogFilter, AccessLogFilter__Output as _envoy_config_accesslog_v3_AccessLogFilter__Output } from '../../../../envoy/config/accesslog/v3/AccessLogFilter'; +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; + +export interface AccessLog { + /** + * The name of the access log extension configuration. + */ + 'name'?: (string); + /** + * Filter which is used to determine if the access log needs to be written. + */ + 'filter'?: (_envoy_config_accesslog_v3_AccessLogFilter | null); + 'typed_config'?: (_google_protobuf_Any | null); + /** + * Custom configuration that must be set according to the access logger extension being instantiated. + * [#extension-category: envoy.access_loggers] + */ + 'config_type'?: "typed_config"; +} + +export interface AccessLog__Output { + /** + * The name of the access log extension configuration. + */ + 'name': (string); + /** + * Filter which is used to determine if the access log needs to be written. + */ + 'filter': (_envoy_config_accesslog_v3_AccessLogFilter__Output | null); + 'typed_config'?: (_google_protobuf_Any__Output | null); + /** + * Custom configuration that must be set according to the access logger extension being instantiated. + * [#extension-category: envoy.access_loggers] + */ + 'config_type': "typed_config"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/AccessLogFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/AccessLogFilter.ts new file mode 100644 index 000000000..09563cb7a --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/AccessLogFilter.ts @@ -0,0 +1,135 @@ +// Original file: deps/envoy-api/envoy/config/accesslog/v3/accesslog.proto + +import type { StatusCodeFilter as _envoy_config_accesslog_v3_StatusCodeFilter, StatusCodeFilter__Output as _envoy_config_accesslog_v3_StatusCodeFilter__Output } from '../../../../envoy/config/accesslog/v3/StatusCodeFilter'; +import type { DurationFilter as _envoy_config_accesslog_v3_DurationFilter, DurationFilter__Output as _envoy_config_accesslog_v3_DurationFilter__Output } from '../../../../envoy/config/accesslog/v3/DurationFilter'; +import type { NotHealthCheckFilter as _envoy_config_accesslog_v3_NotHealthCheckFilter, NotHealthCheckFilter__Output as _envoy_config_accesslog_v3_NotHealthCheckFilter__Output } from '../../../../envoy/config/accesslog/v3/NotHealthCheckFilter'; +import type { TraceableFilter as _envoy_config_accesslog_v3_TraceableFilter, TraceableFilter__Output as _envoy_config_accesslog_v3_TraceableFilter__Output } from '../../../../envoy/config/accesslog/v3/TraceableFilter'; +import type { RuntimeFilter as _envoy_config_accesslog_v3_RuntimeFilter, RuntimeFilter__Output as _envoy_config_accesslog_v3_RuntimeFilter__Output } from '../../../../envoy/config/accesslog/v3/RuntimeFilter'; +import type { AndFilter as _envoy_config_accesslog_v3_AndFilter, AndFilter__Output as _envoy_config_accesslog_v3_AndFilter__Output } from '../../../../envoy/config/accesslog/v3/AndFilter'; +import type { OrFilter as _envoy_config_accesslog_v3_OrFilter, OrFilter__Output as _envoy_config_accesslog_v3_OrFilter__Output } from '../../../../envoy/config/accesslog/v3/OrFilter'; +import type { HeaderFilter as _envoy_config_accesslog_v3_HeaderFilter, HeaderFilter__Output as _envoy_config_accesslog_v3_HeaderFilter__Output } from '../../../../envoy/config/accesslog/v3/HeaderFilter'; +import type { ResponseFlagFilter as _envoy_config_accesslog_v3_ResponseFlagFilter, ResponseFlagFilter__Output as _envoy_config_accesslog_v3_ResponseFlagFilter__Output } from '../../../../envoy/config/accesslog/v3/ResponseFlagFilter'; +import type { GrpcStatusFilter as _envoy_config_accesslog_v3_GrpcStatusFilter, GrpcStatusFilter__Output as _envoy_config_accesslog_v3_GrpcStatusFilter__Output } from '../../../../envoy/config/accesslog/v3/GrpcStatusFilter'; +import type { ExtensionFilter as _envoy_config_accesslog_v3_ExtensionFilter, ExtensionFilter__Output as _envoy_config_accesslog_v3_ExtensionFilter__Output } from '../../../../envoy/config/accesslog/v3/ExtensionFilter'; +import type { MetadataFilter as _envoy_config_accesslog_v3_MetadataFilter, MetadataFilter__Output as _envoy_config_accesslog_v3_MetadataFilter__Output } from '../../../../envoy/config/accesslog/v3/MetadataFilter'; +import type { LogTypeFilter as _envoy_config_accesslog_v3_LogTypeFilter, LogTypeFilter__Output as _envoy_config_accesslog_v3_LogTypeFilter__Output } from '../../../../envoy/config/accesslog/v3/LogTypeFilter'; + +/** + * [#next-free-field: 14] + */ +export interface AccessLogFilter { + /** + * Status code filter. + */ + 'status_code_filter'?: (_envoy_config_accesslog_v3_StatusCodeFilter | null); + /** + * Duration filter. + */ + 'duration_filter'?: (_envoy_config_accesslog_v3_DurationFilter | null); + /** + * Not health check filter. + */ + 'not_health_check_filter'?: (_envoy_config_accesslog_v3_NotHealthCheckFilter | null); + /** + * Traceable filter. + */ + 'traceable_filter'?: (_envoy_config_accesslog_v3_TraceableFilter | null); + /** + * Runtime filter. + */ + 'runtime_filter'?: (_envoy_config_accesslog_v3_RuntimeFilter | null); + /** + * And filter. + */ + 'and_filter'?: (_envoy_config_accesslog_v3_AndFilter | null); + /** + * Or filter. + */ + 'or_filter'?: (_envoy_config_accesslog_v3_OrFilter | null); + /** + * Header filter. + */ + 'header_filter'?: (_envoy_config_accesslog_v3_HeaderFilter | null); + /** + * Response flag filter. + */ + 'response_flag_filter'?: (_envoy_config_accesslog_v3_ResponseFlagFilter | null); + /** + * gRPC status filter. + */ + 'grpc_status_filter'?: (_envoy_config_accesslog_v3_GrpcStatusFilter | null); + /** + * Extension filter. + * [#extension-category: envoy.access_loggers.extension_filters] + */ + 'extension_filter'?: (_envoy_config_accesslog_v3_ExtensionFilter | null); + /** + * Metadata Filter + */ + 'metadata_filter'?: (_envoy_config_accesslog_v3_MetadataFilter | null); + /** + * Log Type Filter + */ + 'log_type_filter'?: (_envoy_config_accesslog_v3_LogTypeFilter | null); + 'filter_specifier'?: "status_code_filter"|"duration_filter"|"not_health_check_filter"|"traceable_filter"|"runtime_filter"|"and_filter"|"or_filter"|"header_filter"|"response_flag_filter"|"grpc_status_filter"|"extension_filter"|"metadata_filter"|"log_type_filter"; +} + +/** + * [#next-free-field: 14] + */ +export interface AccessLogFilter__Output { + /** + * Status code filter. + */ + 'status_code_filter'?: (_envoy_config_accesslog_v3_StatusCodeFilter__Output | null); + /** + * Duration filter. + */ + 'duration_filter'?: (_envoy_config_accesslog_v3_DurationFilter__Output | null); + /** + * Not health check filter. + */ + 'not_health_check_filter'?: (_envoy_config_accesslog_v3_NotHealthCheckFilter__Output | null); + /** + * Traceable filter. + */ + 'traceable_filter'?: (_envoy_config_accesslog_v3_TraceableFilter__Output | null); + /** + * Runtime filter. + */ + 'runtime_filter'?: (_envoy_config_accesslog_v3_RuntimeFilter__Output | null); + /** + * And filter. + */ + 'and_filter'?: (_envoy_config_accesslog_v3_AndFilter__Output | null); + /** + * Or filter. + */ + 'or_filter'?: (_envoy_config_accesslog_v3_OrFilter__Output | null); + /** + * Header filter. + */ + 'header_filter'?: (_envoy_config_accesslog_v3_HeaderFilter__Output | null); + /** + * Response flag filter. + */ + 'response_flag_filter'?: (_envoy_config_accesslog_v3_ResponseFlagFilter__Output | null); + /** + * gRPC status filter. + */ + 'grpc_status_filter'?: (_envoy_config_accesslog_v3_GrpcStatusFilter__Output | null); + /** + * Extension filter. + * [#extension-category: envoy.access_loggers.extension_filters] + */ + 'extension_filter'?: (_envoy_config_accesslog_v3_ExtensionFilter__Output | null); + /** + * Metadata Filter + */ + 'metadata_filter'?: (_envoy_config_accesslog_v3_MetadataFilter__Output | null); + /** + * Log Type Filter + */ + 'log_type_filter'?: (_envoy_config_accesslog_v3_LogTypeFilter__Output | null); + 'filter_specifier': "status_code_filter"|"duration_filter"|"not_health_check_filter"|"traceable_filter"|"runtime_filter"|"and_filter"|"or_filter"|"header_filter"|"response_flag_filter"|"grpc_status_filter"|"extension_filter"|"metadata_filter"|"log_type_filter"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/AndFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/AndFilter.ts similarity index 51% rename from packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/AndFilter.ts rename to packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/AndFilter.ts index 7cf1cb98c..c3c2ac8b4 100644 --- a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/AndFilter.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/AndFilter.ts @@ -1,6 +1,6 @@ -// Original file: deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto +// Original file: deps/envoy-api/envoy/config/accesslog/v3/accesslog.proto -import type { AccessLogFilter as _envoy_config_filter_accesslog_v2_AccessLogFilter, AccessLogFilter__Output as _envoy_config_filter_accesslog_v2_AccessLogFilter__Output } from '../../../../../envoy/config/filter/accesslog/v2/AccessLogFilter'; +import type { AccessLogFilter as _envoy_config_accesslog_v3_AccessLogFilter, AccessLogFilter__Output as _envoy_config_accesslog_v3_AccessLogFilter__Output } from '../../../../envoy/config/accesslog/v3/AccessLogFilter'; /** * Performs a logical “and” operation on the result of each filter in filters. @@ -8,7 +8,7 @@ import type { AccessLogFilter as _envoy_config_filter_accesslog_v2_AccessLogFilt * filter returns false immediately. */ export interface AndFilter { - 'filters'?: (_envoy_config_filter_accesslog_v2_AccessLogFilter)[]; + 'filters'?: (_envoy_config_accesslog_v3_AccessLogFilter)[]; } /** @@ -17,5 +17,5 @@ export interface AndFilter { * filter returns false immediately. */ export interface AndFilter__Output { - 'filters': (_envoy_config_filter_accesslog_v2_AccessLogFilter__Output)[]; + 'filters': (_envoy_config_accesslog_v3_AccessLogFilter__Output)[]; } diff --git a/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/ComparisonFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/ComparisonFilter.ts new file mode 100644 index 000000000..68c8f1b65 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/ComparisonFilter.ts @@ -0,0 +1,67 @@ +// Original file: deps/envoy-api/envoy/config/accesslog/v3/accesslog.proto + +import type { RuntimeUInt32 as _envoy_config_core_v3_RuntimeUInt32, RuntimeUInt32__Output as _envoy_config_core_v3_RuntimeUInt32__Output } from '../../../../envoy/config/core/v3/RuntimeUInt32'; + +// Original file: deps/envoy-api/envoy/config/accesslog/v3/accesslog.proto + +export const _envoy_config_accesslog_v3_ComparisonFilter_Op = { + /** + * = + */ + EQ: 'EQ', + /** + * >= + */ + GE: 'GE', + /** + * <= + */ + LE: 'LE', +} as const; + +export type _envoy_config_accesslog_v3_ComparisonFilter_Op = + /** + * = + */ + | 'EQ' + | 0 + /** + * >= + */ + | 'GE' + | 1 + /** + * <= + */ + | 'LE' + | 2 + +export type _envoy_config_accesslog_v3_ComparisonFilter_Op__Output = typeof _envoy_config_accesslog_v3_ComparisonFilter_Op[keyof typeof _envoy_config_accesslog_v3_ComparisonFilter_Op] + +/** + * Filter on an integer comparison. + */ +export interface ComparisonFilter { + /** + * Comparison operator. + */ + 'op'?: (_envoy_config_accesslog_v3_ComparisonFilter_Op); + /** + * Value to compare against. + */ + 'value'?: (_envoy_config_core_v3_RuntimeUInt32 | null); +} + +/** + * Filter on an integer comparison. + */ +export interface ComparisonFilter__Output { + /** + * Comparison operator. + */ + 'op': (_envoy_config_accesslog_v3_ComparisonFilter_Op__Output); + /** + * Value to compare against. + */ + 'value': (_envoy_config_core_v3_RuntimeUInt32__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/DurationFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/DurationFilter.ts new file mode 100644 index 000000000..024936704 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/DurationFilter.ts @@ -0,0 +1,29 @@ +// Original file: deps/envoy-api/envoy/config/accesslog/v3/accesslog.proto + +import type { ComparisonFilter as _envoy_config_accesslog_v3_ComparisonFilter, ComparisonFilter__Output as _envoy_config_accesslog_v3_ComparisonFilter__Output } from '../../../../envoy/config/accesslog/v3/ComparisonFilter'; + +/** + * Filters based on the duration of the request or stream, in milliseconds. + * For end of stream access logs, the total duration of the stream will be used. + * For :ref:`periodic access logs`, + * the duration of the stream at the time of log recording will be used. + */ +export interface DurationFilter { + /** + * Comparison. + */ + 'comparison'?: (_envoy_config_accesslog_v3_ComparisonFilter | null); +} + +/** + * Filters based on the duration of the request or stream, in milliseconds. + * For end of stream access logs, the total duration of the stream will be used. + * For :ref:`periodic access logs`, + * the duration of the stream at the time of log recording will be used. + */ +export interface DurationFilter__Output { + /** + * Comparison. + */ + 'comparison': (_envoy_config_accesslog_v3_ComparisonFilter__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/ExtensionFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/ExtensionFilter.ts similarity index 56% rename from packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/ExtensionFilter.ts rename to packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/ExtensionFilter.ts index 184c76eb6..19edb671b 100644 --- a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/ExtensionFilter.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/ExtensionFilter.ts @@ -1,7 +1,6 @@ -// Original file: deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto +// Original file: deps/envoy-api/envoy/config/accesslog/v3/accesslog.proto -import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../../google/protobuf/Struct'; -import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../../google/protobuf/Any'; +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; /** * Extension filter is statically registered at runtime. @@ -12,12 +11,11 @@ export interface ExtensionFilter { * match a statically registered filter. */ 'name'?: (string); - 'config'?: (_google_protobuf_Struct); - 'typed_config'?: (_google_protobuf_Any); + 'typed_config'?: (_google_protobuf_Any | null); /** * Custom configuration that depends on the filter being instantiated. */ - 'config_type'?: "config"|"typed_config"; + 'config_type'?: "typed_config"; } /** @@ -29,10 +27,9 @@ export interface ExtensionFilter__Output { * match a statically registered filter. */ 'name': (string); - 'config'?: (_google_protobuf_Struct__Output); - 'typed_config'?: (_google_protobuf_Any__Output); + 'typed_config'?: (_google_protobuf_Any__Output | null); /** * Custom configuration that depends on the filter being instantiated. */ - 'config_type': "config"|"typed_config"; + 'config_type': "typed_config"; } diff --git a/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/GrpcStatusFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/GrpcStatusFilter.ts new file mode 100644 index 000000000..ec18bde8a --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/GrpcStatusFilter.ts @@ -0,0 +1,96 @@ +// Original file: deps/envoy-api/envoy/config/accesslog/v3/accesslog.proto + + +// Original file: deps/envoy-api/envoy/config/accesslog/v3/accesslog.proto + +export const _envoy_config_accesslog_v3_GrpcStatusFilter_Status = { + OK: 'OK', + CANCELED: 'CANCELED', + UNKNOWN: 'UNKNOWN', + INVALID_ARGUMENT: 'INVALID_ARGUMENT', + DEADLINE_EXCEEDED: 'DEADLINE_EXCEEDED', + NOT_FOUND: 'NOT_FOUND', + ALREADY_EXISTS: 'ALREADY_EXISTS', + PERMISSION_DENIED: 'PERMISSION_DENIED', + RESOURCE_EXHAUSTED: 'RESOURCE_EXHAUSTED', + FAILED_PRECONDITION: 'FAILED_PRECONDITION', + ABORTED: 'ABORTED', + OUT_OF_RANGE: 'OUT_OF_RANGE', + UNIMPLEMENTED: 'UNIMPLEMENTED', + INTERNAL: 'INTERNAL', + UNAVAILABLE: 'UNAVAILABLE', + DATA_LOSS: 'DATA_LOSS', + UNAUTHENTICATED: 'UNAUTHENTICATED', +} as const; + +export type _envoy_config_accesslog_v3_GrpcStatusFilter_Status = + | 'OK' + | 0 + | 'CANCELED' + | 1 + | 'UNKNOWN' + | 2 + | 'INVALID_ARGUMENT' + | 3 + | 'DEADLINE_EXCEEDED' + | 4 + | 'NOT_FOUND' + | 5 + | 'ALREADY_EXISTS' + | 6 + | 'PERMISSION_DENIED' + | 7 + | 'RESOURCE_EXHAUSTED' + | 8 + | 'FAILED_PRECONDITION' + | 9 + | 'ABORTED' + | 10 + | 'OUT_OF_RANGE' + | 11 + | 'UNIMPLEMENTED' + | 12 + | 'INTERNAL' + | 13 + | 'UNAVAILABLE' + | 14 + | 'DATA_LOSS' + | 15 + | 'UNAUTHENTICATED' + | 16 + +export type _envoy_config_accesslog_v3_GrpcStatusFilter_Status__Output = typeof _envoy_config_accesslog_v3_GrpcStatusFilter_Status[keyof typeof _envoy_config_accesslog_v3_GrpcStatusFilter_Status] + +/** + * Filters gRPC requests based on their response status. If a gRPC status is not + * provided, the filter will infer the status from the HTTP status code. + */ +export interface GrpcStatusFilter { + /** + * Logs only responses that have any one of the gRPC statuses in this field. + */ + 'statuses'?: (_envoy_config_accesslog_v3_GrpcStatusFilter_Status)[]; + /** + * If included and set to true, the filter will instead block all responses + * with a gRPC status or inferred gRPC status enumerated in statuses, and + * allow all other responses. + */ + 'exclude'?: (boolean); +} + +/** + * Filters gRPC requests based on their response status. If a gRPC status is not + * provided, the filter will infer the status from the HTTP status code. + */ +export interface GrpcStatusFilter__Output { + /** + * Logs only responses that have any one of the gRPC statuses in this field. + */ + 'statuses': (_envoy_config_accesslog_v3_GrpcStatusFilter_Status__Output)[]; + /** + * If included and set to true, the filter will instead block all responses + * with a gRPC status or inferred gRPC status enumerated in statuses, and + * allow all other responses. + */ + 'exclude': (boolean); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/HeaderFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/HeaderFilter.ts new file mode 100644 index 000000000..294084d1c --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/HeaderFilter.ts @@ -0,0 +1,25 @@ +// Original file: deps/envoy-api/envoy/config/accesslog/v3/accesslog.proto + +import type { HeaderMatcher as _envoy_config_route_v3_HeaderMatcher, HeaderMatcher__Output as _envoy_config_route_v3_HeaderMatcher__Output } from '../../../../envoy/config/route/v3/HeaderMatcher'; + +/** + * Filters requests based on the presence or value of a request header. + */ +export interface HeaderFilter { + /** + * Only requests with a header which matches the specified HeaderMatcher will + * pass the filter check. + */ + 'header'?: (_envoy_config_route_v3_HeaderMatcher | null); +} + +/** + * Filters requests based on the presence or value of a request header. + */ +export interface HeaderFilter__Output { + /** + * Only requests with a header which matches the specified HeaderMatcher will + * pass the filter check. + */ + 'header': (_envoy_config_route_v3_HeaderMatcher__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/LogTypeFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/LogTypeFilter.ts new file mode 100644 index 000000000..59ad8e308 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/LogTypeFilter.ts @@ -0,0 +1,33 @@ +// Original file: deps/envoy-api/envoy/config/accesslog/v3/accesslog.proto + +import type { AccessLogType as _envoy_data_accesslog_v3_AccessLogType, AccessLogType__Output as _envoy_data_accesslog_v3_AccessLogType__Output } from '../../../../envoy/data/accesslog/v3/AccessLogType'; + +/** + * Filters based on access log type. + */ +export interface LogTypeFilter { + /** + * Logs only records which their type is one of the types defined in this field. + */ + 'types'?: (_envoy_data_accesslog_v3_AccessLogType)[]; + /** + * If this field is set to true, the filter will instead block all records + * with a access log type in types field, and allow all other records. + */ + 'exclude'?: (boolean); +} + +/** + * Filters based on access log type. + */ +export interface LogTypeFilter__Output { + /** + * Logs only records which their type is one of the types defined in this field. + */ + 'types': (_envoy_data_accesslog_v3_AccessLogType__Output)[]; + /** + * If this field is set to true, the filter will instead block all records + * with a access log type in types field, and allow all other records. + */ + 'exclude': (boolean); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/MetadataFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/MetadataFilter.ts new file mode 100644 index 000000000..cd821fef6 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/MetadataFilter.ts @@ -0,0 +1,48 @@ +// Original file: deps/envoy-api/envoy/config/accesslog/v3/accesslog.proto + +import type { MetadataMatcher as _envoy_type_matcher_v3_MetadataMatcher, MetadataMatcher__Output as _envoy_type_matcher_v3_MetadataMatcher__Output } from '../../../../envoy/type/matcher/v3/MetadataMatcher'; +import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; + +/** + * Filters based on matching dynamic metadata. + * If the matcher path and key correspond to an existing key in dynamic + * metadata, the request is logged only if the matcher value is equal to the + * metadata value. If the matcher path and key *do not* correspond to an + * existing key in dynamic metadata, the request is logged only if + * match_if_key_not_found is "true" or unset. + */ +export interface MetadataFilter { + /** + * Matcher to check metadata for specified value. For example, to match on the + * access_log_hint metadata, set the filter to "envoy.common" and the path to + * "access_log_hint", and the value to "true". + */ + 'matcher'?: (_envoy_type_matcher_v3_MetadataMatcher | null); + /** + * Default result if the key does not exist in dynamic metadata: if unset or + * true, then log; if false, then don't log. + */ + 'match_if_key_not_found'?: (_google_protobuf_BoolValue | null); +} + +/** + * Filters based on matching dynamic metadata. + * If the matcher path and key correspond to an existing key in dynamic + * metadata, the request is logged only if the matcher value is equal to the + * metadata value. If the matcher path and key *do not* correspond to an + * existing key in dynamic metadata, the request is logged only if + * match_if_key_not_found is "true" or unset. + */ +export interface MetadataFilter__Output { + /** + * Matcher to check metadata for specified value. For example, to match on the + * access_log_hint metadata, set the filter to "envoy.common" and the path to + * "access_log_hint", and the value to "true". + */ + 'matcher': (_envoy_type_matcher_v3_MetadataMatcher__Output | null); + /** + * Default result if the key does not exist in dynamic metadata: if unset or + * true, then log; if false, then don't log. + */ + 'match_if_key_not_found': (_google_protobuf_BoolValue__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/NotHealthCheckFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/NotHealthCheckFilter.ts similarity index 81% rename from packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/NotHealthCheckFilter.ts rename to packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/NotHealthCheckFilter.ts index 3de68f081..40728fc34 100644 --- a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/NotHealthCheckFilter.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/NotHealthCheckFilter.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto +// Original file: deps/envoy-api/envoy/config/accesslog/v3/accesslog.proto /** diff --git a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/OrFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/OrFilter.ts similarity index 50% rename from packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/OrFilter.ts rename to packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/OrFilter.ts index 859c1218c..1756d3ad5 100644 --- a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/OrFilter.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/OrFilter.ts @@ -1,6 +1,6 @@ -// Original file: deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto +// Original file: deps/envoy-api/envoy/config/accesslog/v3/accesslog.proto -import type { AccessLogFilter as _envoy_config_filter_accesslog_v2_AccessLogFilter, AccessLogFilter__Output as _envoy_config_filter_accesslog_v2_AccessLogFilter__Output } from '../../../../../envoy/config/filter/accesslog/v2/AccessLogFilter'; +import type { AccessLogFilter as _envoy_config_accesslog_v3_AccessLogFilter, AccessLogFilter__Output as _envoy_config_accesslog_v3_AccessLogFilter__Output } from '../../../../envoy/config/accesslog/v3/AccessLogFilter'; /** * Performs a logical “or” operation on the result of each individual filter. @@ -8,7 +8,7 @@ import type { AccessLogFilter as _envoy_config_filter_accesslog_v2_AccessLogFilt * filter returns true immediately. */ export interface OrFilter { - 'filters'?: (_envoy_config_filter_accesslog_v2_AccessLogFilter)[]; + 'filters'?: (_envoy_config_accesslog_v3_AccessLogFilter)[]; } /** @@ -17,5 +17,5 @@ export interface OrFilter { * filter returns true immediately. */ export interface OrFilter__Output { - 'filters': (_envoy_config_filter_accesslog_v2_AccessLogFilter__Output)[]; + 'filters': (_envoy_config_accesslog_v3_AccessLogFilter__Output)[]; } diff --git a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/ResponseFlagFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/ResponseFlagFilter.ts similarity index 51% rename from packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/ResponseFlagFilter.ts rename to packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/ResponseFlagFilter.ts index 280fbb26a..c45a63054 100644 --- a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/ResponseFlagFilter.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/ResponseFlagFilter.ts @@ -1,16 +1,17 @@ -// Original file: deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto +// Original file: deps/envoy-api/envoy/config/accesslog/v3/accesslog.proto /** * Filters requests that received responses with an Envoy response flag set. * A list of the response flags can be found - * in the access log formatter :ref:`documentation`. + * in the access log formatter + * :ref:`documentation`. */ export interface ResponseFlagFilter { /** - * Only responses with the any of the flags listed in this field will be logged. - * This field is optional. If it is not specified, then any response flag will pass - * the filter check. + * Only responses with the any of the flags listed in this field will be + * logged. This field is optional. If it is not specified, then any response + * flag will pass the filter check. */ 'flags'?: (string)[]; } @@ -18,13 +19,14 @@ export interface ResponseFlagFilter { /** * Filters requests that received responses with an Envoy response flag set. * A list of the response flags can be found - * in the access log formatter :ref:`documentation`. + * in the access log formatter + * :ref:`documentation`. */ export interface ResponseFlagFilter__Output { /** - * Only responses with the any of the flags listed in this field will be logged. - * This field is optional. If it is not specified, then any response flag will pass - * the filter check. + * Only responses with the any of the flags listed in this field will be + * logged. This field is optional. If it is not specified, then any response + * flag will pass the filter check. */ 'flags': (string)[]; } diff --git a/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/RuntimeFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/RuntimeFilter.ts new file mode 100644 index 000000000..b1c940088 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/RuntimeFilter.ts @@ -0,0 +1,73 @@ +// Original file: deps/envoy-api/envoy/config/accesslog/v3/accesslog.proto + +import type { FractionalPercent as _envoy_type_v3_FractionalPercent, FractionalPercent__Output as _envoy_type_v3_FractionalPercent__Output } from '../../../../envoy/type/v3/FractionalPercent'; + +/** + * Filters for random sampling of requests. + */ +export interface RuntimeFilter { + /** + * Runtime key to get an optional overridden numerator for use in the + * ``percent_sampled`` field. If found in runtime, this value will replace the + * default numerator. + */ + 'runtime_key'?: (string); + /** + * The default sampling percentage. If not specified, defaults to 0% with + * denominator of 100. + */ + 'percent_sampled'?: (_envoy_type_v3_FractionalPercent | null); + /** + * By default, sampling pivots on the header + * :ref:`x-request-id` being + * present. If :ref:`x-request-id` + * is present, the filter will consistently sample across multiple hosts based + * on the runtime key value and the value extracted from + * :ref:`x-request-id`. If it is + * missing, or ``use_independent_randomness`` is set to true, the filter will + * randomly sample based on the runtime key value alone. + * ``use_independent_randomness`` can be used for logging kill switches within + * complex nested :ref:`AndFilter + * ` and :ref:`OrFilter + * ` blocks that are easier to + * reason about from a probability perspective (i.e., setting to true will + * cause the filter to behave like an independent random variable when + * composed within logical operator filters). + */ + 'use_independent_randomness'?: (boolean); +} + +/** + * Filters for random sampling of requests. + */ +export interface RuntimeFilter__Output { + /** + * Runtime key to get an optional overridden numerator for use in the + * ``percent_sampled`` field. If found in runtime, this value will replace the + * default numerator. + */ + 'runtime_key': (string); + /** + * The default sampling percentage. If not specified, defaults to 0% with + * denominator of 100. + */ + 'percent_sampled': (_envoy_type_v3_FractionalPercent__Output | null); + /** + * By default, sampling pivots on the header + * :ref:`x-request-id` being + * present. If :ref:`x-request-id` + * is present, the filter will consistently sample across multiple hosts based + * on the runtime key value and the value extracted from + * :ref:`x-request-id`. If it is + * missing, or ``use_independent_randomness`` is set to true, the filter will + * randomly sample based on the runtime key value alone. + * ``use_independent_randomness`` can be used for logging kill switches within + * complex nested :ref:`AndFilter + * ` and :ref:`OrFilter + * ` blocks that are easier to + * reason about from a probability perspective (i.e., setting to true will + * cause the filter to behave like an independent random variable when + * composed within logical operator filters). + */ + 'use_independent_randomness': (boolean); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/StatusCodeFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/StatusCodeFilter.ts new file mode 100644 index 000000000..a071b5cbb --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/StatusCodeFilter.ts @@ -0,0 +1,23 @@ +// Original file: deps/envoy-api/envoy/config/accesslog/v3/accesslog.proto + +import type { ComparisonFilter as _envoy_config_accesslog_v3_ComparisonFilter, ComparisonFilter__Output as _envoy_config_accesslog_v3_ComparisonFilter__Output } from '../../../../envoy/config/accesslog/v3/ComparisonFilter'; + +/** + * Filters on HTTP response/status code. + */ +export interface StatusCodeFilter { + /** + * Comparison. + */ + 'comparison'?: (_envoy_config_accesslog_v3_ComparisonFilter | null); +} + +/** + * Filters on HTTP response/status code. + */ +export interface StatusCodeFilter__Output { + /** + * Comparison. + */ + 'comparison': (_envoy_config_accesslog_v3_ComparisonFilter__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/TraceableFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/TraceableFilter.ts similarity index 81% rename from packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/TraceableFilter.ts rename to packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/TraceableFilter.ts index 8de24656a..c2b3a646b 100644 --- a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/TraceableFilter.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/accesslog/v3/TraceableFilter.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto +// Original file: deps/envoy-api/envoy/config/accesslog/v3/accesslog.proto /** diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/cluster/CircuitBreakers.ts b/packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/CircuitBreakers.ts similarity index 52% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/cluster/CircuitBreakers.ts rename to packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/CircuitBreakers.ts index edf946590..61f473134 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/cluster/CircuitBreakers.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/CircuitBreakers.ts @@ -1,10 +1,10 @@ -// Original file: deps/envoy-api/envoy/api/v2/cluster/circuit_breaker.proto +// Original file: deps/envoy-api/envoy/config/cluster/v3/circuit_breaker.proto -import type { RoutingPriority as _envoy_api_v2_core_RoutingPriority } from '../../../../envoy/api/v2/core/RoutingPriority'; +import type { RoutingPriority as _envoy_config_core_v3_RoutingPriority, RoutingPriority__Output as _envoy_config_core_v3_RoutingPriority__Output } from '../../../../envoy/config/core/v3/RoutingPriority'; import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; -import type { Percent as _envoy_type_Percent, Percent__Output as _envoy_type_Percent__Output } from '../../../../envoy/type/Percent'; +import type { Percent as _envoy_type_v3_Percent, Percent__Output as _envoy_type_v3_Percent__Output } from '../../../../envoy/type/v3/Percent'; -export interface _envoy_api_v2_cluster_CircuitBreakers_Thresholds_RetryBudget { +export interface _envoy_config_cluster_v3_CircuitBreakers_Thresholds_RetryBudget { /** * Specifies the limit on concurrent retries as a percentage of the sum of active requests and * active pending requests. For example, if there are 100 active requests and the @@ -12,17 +12,17 @@ export interface _envoy_api_v2_cluster_CircuitBreakers_Thresholds_RetryBudget { * * This parameter is optional. Defaults to 20%. */ - 'budget_percent'?: (_envoy_type_Percent); + 'budget_percent'?: (_envoy_type_v3_Percent | null); /** * Specifies the minimum retry concurrency allowed for the retry budget. The limit on the * number of active retries may never go below this number. * * This parameter is optional. Defaults to 3. */ - 'min_retry_concurrency'?: (_google_protobuf_UInt32Value); + 'min_retry_concurrency'?: (_google_protobuf_UInt32Value | null); } -export interface _envoy_api_v2_cluster_CircuitBreakers_Thresholds_RetryBudget__Output { +export interface _envoy_config_cluster_v3_CircuitBreakers_Thresholds_RetryBudget__Output { /** * Specifies the limit on concurrent retries as a percentage of the sum of active requests and * active pending requests. For example, if there are 100 active requests and the @@ -30,47 +30,49 @@ export interface _envoy_api_v2_cluster_CircuitBreakers_Thresholds_RetryBudget__O * * This parameter is optional. Defaults to 20%. */ - 'budget_percent'?: (_envoy_type_Percent__Output); + 'budget_percent': (_envoy_type_v3_Percent__Output | null); /** * Specifies the minimum retry concurrency allowed for the retry budget. The limit on the * number of active retries may never go below this number. * * This parameter is optional. Defaults to 3. */ - 'min_retry_concurrency'?: (_google_protobuf_UInt32Value__Output); + 'min_retry_concurrency': (_google_protobuf_UInt32Value__Output | null); } /** * A Thresholds defines CircuitBreaker settings for a - * :ref:`RoutingPriority`. + * :ref:`RoutingPriority`. * [#next-free-field: 9] */ -export interface _envoy_api_v2_cluster_CircuitBreakers_Thresholds { +export interface _envoy_config_cluster_v3_CircuitBreakers_Thresholds { /** - * The :ref:`RoutingPriority` + * The :ref:`RoutingPriority` * the specified CircuitBreaker settings apply to. */ - 'priority'?: (_envoy_api_v2_core_RoutingPriority | keyof typeof _envoy_api_v2_core_RoutingPriority); + 'priority'?: (_envoy_config_core_v3_RoutingPriority); /** * The maximum number of connections that Envoy will make to the upstream * cluster. If not specified, the default is 1024. */ - 'max_connections'?: (_google_protobuf_UInt32Value); + 'max_connections'?: (_google_protobuf_UInt32Value | null); /** * The maximum number of pending requests that Envoy will allow to the * upstream cluster. If not specified, the default is 1024. + * This limit is applied as a connection limit for non-HTTP traffic. */ - 'max_pending_requests'?: (_google_protobuf_UInt32Value); + 'max_pending_requests'?: (_google_protobuf_UInt32Value | null); /** * The maximum number of parallel requests that Envoy will make to the * upstream cluster. If not specified, the default is 1024. + * This limit does not apply to non-HTTP traffic. */ - 'max_requests'?: (_google_protobuf_UInt32Value); + 'max_requests'?: (_google_protobuf_UInt32Value | null); /** * The maximum number of parallel retries that Envoy will allow to the * upstream cluster. If not specified, the default is 3. */ - 'max_retries'?: (_google_protobuf_UInt32Value); + 'max_retries'?: (_google_protobuf_UInt32Value | null); /** * Specifies a limit on concurrent retries in relation to the number of active requests. This * parameter is optional. @@ -80,7 +82,7 @@ export interface _envoy_api_v2_cluster_CircuitBreakers_Thresholds { * If this field is set, the retry budget will override any configured retry circuit * breaker. */ - 'retry_budget'?: (_envoy_api_v2_cluster_CircuitBreakers_Thresholds_RetryBudget); + 'retry_budget'?: (_envoy_config_cluster_v3_CircuitBreakers_Thresholds_RetryBudget | null); /** * If track_remaining is true, then stats will be published that expose * the number of resources remaining until the circuit breakers open. If @@ -99,40 +101,42 @@ export interface _envoy_api_v2_cluster_CircuitBreakers_Thresholds { * :ref:`Circuit Breaking ` for * more details. */ - 'max_connection_pools'?: (_google_protobuf_UInt32Value); + 'max_connection_pools'?: (_google_protobuf_UInt32Value | null); } /** * A Thresholds defines CircuitBreaker settings for a - * :ref:`RoutingPriority`. + * :ref:`RoutingPriority`. * [#next-free-field: 9] */ -export interface _envoy_api_v2_cluster_CircuitBreakers_Thresholds__Output { +export interface _envoy_config_cluster_v3_CircuitBreakers_Thresholds__Output { /** - * The :ref:`RoutingPriority` + * The :ref:`RoutingPriority` * the specified CircuitBreaker settings apply to. */ - 'priority': (keyof typeof _envoy_api_v2_core_RoutingPriority); + 'priority': (_envoy_config_core_v3_RoutingPriority__Output); /** * The maximum number of connections that Envoy will make to the upstream * cluster. If not specified, the default is 1024. */ - 'max_connections'?: (_google_protobuf_UInt32Value__Output); + 'max_connections': (_google_protobuf_UInt32Value__Output | null); /** * The maximum number of pending requests that Envoy will allow to the * upstream cluster. If not specified, the default is 1024. + * This limit is applied as a connection limit for non-HTTP traffic. */ - 'max_pending_requests'?: (_google_protobuf_UInt32Value__Output); + 'max_pending_requests': (_google_protobuf_UInt32Value__Output | null); /** * The maximum number of parallel requests that Envoy will make to the * upstream cluster. If not specified, the default is 1024. + * This limit does not apply to non-HTTP traffic. */ - 'max_requests'?: (_google_protobuf_UInt32Value__Output); + 'max_requests': (_google_protobuf_UInt32Value__Output | null); /** * The maximum number of parallel retries that Envoy will allow to the * upstream cluster. If not specified, the default is 3. */ - 'max_retries'?: (_google_protobuf_UInt32Value__Output); + 'max_retries': (_google_protobuf_UInt32Value__Output | null); /** * Specifies a limit on concurrent retries in relation to the number of active requests. This * parameter is optional. @@ -142,7 +146,7 @@ export interface _envoy_api_v2_cluster_CircuitBreakers_Thresholds__Output { * If this field is set, the retry budget will override any configured retry circuit * breaker. */ - 'retry_budget'?: (_envoy_api_v2_cluster_CircuitBreakers_Thresholds_RetryBudget__Output); + 'retry_budget': (_envoy_config_cluster_v3_CircuitBreakers_Thresholds_RetryBudget__Output | null); /** * If track_remaining is true, then stats will be published that expose * the number of resources remaining until the circuit breakers open. If @@ -161,7 +165,7 @@ export interface _envoy_api_v2_cluster_CircuitBreakers_Thresholds__Output { * :ref:`Circuit Breaking ` for * more details. */ - 'max_connection_pools'?: (_google_protobuf_UInt32Value__Output); + 'max_connection_pools': (_google_protobuf_UInt32Value__Output | null); } /** @@ -170,13 +174,27 @@ export interface _envoy_api_v2_cluster_CircuitBreakers_Thresholds__Output { */ export interface CircuitBreakers { /** - * If multiple :ref:`Thresholds` - * are defined with the same :ref:`RoutingPriority`, + * If multiple :ref:`Thresholds` + * are defined with the same :ref:`RoutingPriority`, * the first one in the list is used. If no Thresholds is defined for a given - * :ref:`RoutingPriority`, the default values + * :ref:`RoutingPriority`, the default values * are used. */ - 'thresholds'?: (_envoy_api_v2_cluster_CircuitBreakers_Thresholds)[]; + 'thresholds'?: (_envoy_config_cluster_v3_CircuitBreakers_Thresholds)[]; + /** + * Optional per-host limits which apply to each individual host in a cluster. + * + * .. note:: + * currently only the :ref:`max_connections + * ` field is supported for per-host limits. + * + * If multiple per-host :ref:`Thresholds` + * are defined with the same :ref:`RoutingPriority`, + * the first one in the list is used. If no per-host Thresholds are defined for a given + * :ref:`RoutingPriority`, + * the cluster will not have per-host limits. + */ + 'per_host_thresholds'?: (_envoy_config_cluster_v3_CircuitBreakers_Thresholds)[]; } /** @@ -185,11 +203,25 @@ export interface CircuitBreakers { */ export interface CircuitBreakers__Output { /** - * If multiple :ref:`Thresholds` - * are defined with the same :ref:`RoutingPriority`, + * If multiple :ref:`Thresholds` + * are defined with the same :ref:`RoutingPriority`, * the first one in the list is used. If no Thresholds is defined for a given - * :ref:`RoutingPriority`, the default values + * :ref:`RoutingPriority`, the default values * are used. */ - 'thresholds': (_envoy_api_v2_cluster_CircuitBreakers_Thresholds__Output)[]; + 'thresholds': (_envoy_config_cluster_v3_CircuitBreakers_Thresholds__Output)[]; + /** + * Optional per-host limits which apply to each individual host in a cluster. + * + * .. note:: + * currently only the :ref:`max_connections + * ` field is supported for per-host limits. + * + * If multiple per-host :ref:`Thresholds` + * are defined with the same :ref:`RoutingPriority`, + * the first one in the list is used. If no per-host Thresholds are defined for a given + * :ref:`RoutingPriority`, + * the cluster will not have per-host limits. + */ + 'per_host_thresholds': (_envoy_config_cluster_v3_CircuitBreakers_Thresholds__Output)[]; } diff --git a/packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/Cluster.ts b/packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/Cluster.ts new file mode 100644 index 000000000..8cab36301 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/Cluster.ts @@ -0,0 +1,2666 @@ +// Original file: deps/envoy-api/envoy/config/cluster/v3/cluster.proto + +import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; +import type { HealthCheck as _envoy_config_core_v3_HealthCheck, HealthCheck__Output as _envoy_config_core_v3_HealthCheck__Output } from '../../../../envoy/config/core/v3/HealthCheck'; +import type { CircuitBreakers as _envoy_config_cluster_v3_CircuitBreakers, CircuitBreakers__Output as _envoy_config_cluster_v3_CircuitBreakers__Output } from '../../../../envoy/config/cluster/v3/CircuitBreakers'; +import type { Http1ProtocolOptions as _envoy_config_core_v3_Http1ProtocolOptions, Http1ProtocolOptions__Output as _envoy_config_core_v3_Http1ProtocolOptions__Output } from '../../../../envoy/config/core/v3/Http1ProtocolOptions'; +import type { Http2ProtocolOptions as _envoy_config_core_v3_Http2ProtocolOptions, Http2ProtocolOptions__Output as _envoy_config_core_v3_Http2ProtocolOptions__Output } from '../../../../envoy/config/core/v3/Http2ProtocolOptions'; +import type { Address as _envoy_config_core_v3_Address, Address__Output as _envoy_config_core_v3_Address__Output } from '../../../../envoy/config/core/v3/Address'; +import type { OutlierDetection as _envoy_config_cluster_v3_OutlierDetection, OutlierDetection__Output as _envoy_config_cluster_v3_OutlierDetection__Output } from '../../../../envoy/config/cluster/v3/OutlierDetection'; +import type { BindConfig as _envoy_config_core_v3_BindConfig, BindConfig__Output as _envoy_config_core_v3_BindConfig__Output } from '../../../../envoy/config/core/v3/BindConfig'; +import type { TransportSocket as _envoy_config_core_v3_TransportSocket, TransportSocket__Output as _envoy_config_core_v3_TransportSocket__Output } from '../../../../envoy/config/core/v3/TransportSocket'; +import type { Metadata as _envoy_config_core_v3_Metadata, Metadata__Output as _envoy_config_core_v3_Metadata__Output } from '../../../../envoy/config/core/v3/Metadata'; +import type { HttpProtocolOptions as _envoy_config_core_v3_HttpProtocolOptions, HttpProtocolOptions__Output as _envoy_config_core_v3_HttpProtocolOptions__Output } from '../../../../envoy/config/core/v3/HttpProtocolOptions'; +import type { UpstreamConnectionOptions as _envoy_config_cluster_v3_UpstreamConnectionOptions, UpstreamConnectionOptions__Output as _envoy_config_cluster_v3_UpstreamConnectionOptions__Output } from '../../../../envoy/config/cluster/v3/UpstreamConnectionOptions'; +import type { ClusterLoadAssignment as _envoy_config_endpoint_v3_ClusterLoadAssignment, ClusterLoadAssignment__Output as _envoy_config_endpoint_v3_ClusterLoadAssignment__Output } from '../../../../envoy/config/endpoint/v3/ClusterLoadAssignment'; +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; +import type { Filter as _envoy_config_cluster_v3_Filter, Filter__Output as _envoy_config_cluster_v3_Filter__Output } from '../../../../envoy/config/cluster/v3/Filter'; +import type { LoadBalancingPolicy as _envoy_config_cluster_v3_LoadBalancingPolicy, LoadBalancingPolicy__Output as _envoy_config_cluster_v3_LoadBalancingPolicy__Output } from '../../../../envoy/config/cluster/v3/LoadBalancingPolicy'; +import type { ConfigSource as _envoy_config_core_v3_ConfigSource, ConfigSource__Output as _envoy_config_core_v3_ConfigSource__Output } from '../../../../envoy/config/core/v3/ConfigSource'; +import type { UpstreamHttpProtocolOptions as _envoy_config_core_v3_UpstreamHttpProtocolOptions, UpstreamHttpProtocolOptions__Output as _envoy_config_core_v3_UpstreamHttpProtocolOptions__Output } from '../../../../envoy/config/core/v3/UpstreamHttpProtocolOptions'; +import type { TypedExtensionConfig as _envoy_config_core_v3_TypedExtensionConfig, TypedExtensionConfig__Output as _envoy_config_core_v3_TypedExtensionConfig__Output } from '../../../../envoy/config/core/v3/TypedExtensionConfig'; +import type { TrackClusterStats as _envoy_config_cluster_v3_TrackClusterStats, TrackClusterStats__Output as _envoy_config_cluster_v3_TrackClusterStats__Output } from '../../../../envoy/config/cluster/v3/TrackClusterStats'; +import type { DnsResolutionConfig as _envoy_config_core_v3_DnsResolutionConfig, DnsResolutionConfig__Output as _envoy_config_core_v3_DnsResolutionConfig__Output } from '../../../../envoy/config/core/v3/DnsResolutionConfig'; +import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; +import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../google/protobuf/Struct'; +import type { RuntimeDouble as _envoy_config_core_v3_RuntimeDouble, RuntimeDouble__Output as _envoy_config_core_v3_RuntimeDouble__Output } from '../../../../envoy/config/core/v3/RuntimeDouble'; +import type { Percent as _envoy_type_v3_Percent, Percent__Output as _envoy_type_v3_Percent__Output } from '../../../../envoy/type/v3/Percent'; +import type { UInt64Value as _google_protobuf_UInt64Value, UInt64Value__Output as _google_protobuf_UInt64Value__Output } from '../../../../google/protobuf/UInt64Value'; +import type { HealthStatusSet as _envoy_config_core_v3_HealthStatusSet, HealthStatusSet__Output as _envoy_config_core_v3_HealthStatusSet__Output } from '../../../../envoy/config/core/v3/HealthStatusSet'; +import type { DoubleValue as _google_protobuf_DoubleValue, DoubleValue__Output as _google_protobuf_DoubleValue__Output } from '../../../../google/protobuf/DoubleValue'; + +// Original file: deps/envoy-api/envoy/config/cluster/v3/cluster.proto + +export const _envoy_config_cluster_v3_Cluster_ClusterProtocolSelection = { + /** + * Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). + * If :ref:`http2_protocol_options ` are + * present, HTTP2 will be used, otherwise HTTP1.1 will be used. + */ + USE_CONFIGURED_PROTOCOL: 'USE_CONFIGURED_PROTOCOL', + /** + * Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. + */ + USE_DOWNSTREAM_PROTOCOL: 'USE_DOWNSTREAM_PROTOCOL', +} as const; + +export type _envoy_config_cluster_v3_Cluster_ClusterProtocolSelection = + /** + * Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). + * If :ref:`http2_protocol_options ` are + * present, HTTP2 will be used, otherwise HTTP1.1 will be used. + */ + | 'USE_CONFIGURED_PROTOCOL' + | 0 + /** + * Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. + */ + | 'USE_DOWNSTREAM_PROTOCOL' + | 1 + +export type _envoy_config_cluster_v3_Cluster_ClusterProtocolSelection__Output = typeof _envoy_config_cluster_v3_Cluster_ClusterProtocolSelection[keyof typeof _envoy_config_cluster_v3_Cluster_ClusterProtocolSelection] + +/** + * Common configuration for all load balancer implementations. + * [#next-free-field: 9] + */ +export interface _envoy_config_cluster_v3_Cluster_CommonLbConfig { + /** + * Configures the :ref:`healthy panic threshold `. + * If not specified, the default is 50%. + * To disable panic mode, set to 0%. + * + * .. note:: + * The specified percent will be truncated to the nearest 1%. + */ + 'healthy_panic_threshold'?: (_envoy_type_v3_Percent | null); + 'zone_aware_lb_config'?: (_envoy_config_cluster_v3_Cluster_CommonLbConfig_ZoneAwareLbConfig | null); + 'locality_weighted_lb_config'?: (_envoy_config_cluster_v3_Cluster_CommonLbConfig_LocalityWeightedLbConfig | null); + /** + * If set, all health check/weight/metadata updates that happen within this duration will be + * merged and delivered in one shot when the duration expires. The start of the duration is when + * the first update happens. This is useful for big clusters, with potentially noisy deploys + * that might trigger excessive CPU usage due to a constant stream of healthcheck state changes + * or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new + * cluster). Please always keep in mind that the use of sandbox technologies may change this + * behavior. + * + * If this is not set, we default to a merge window of 1000ms. To disable it, set the merge + * window to 0. + * + * Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is + * because merging those updates isn't currently safe. See + * https://github.com/envoyproxy/envoy/pull/3941. + */ + 'update_merge_window'?: (_google_protobuf_Duration | null); + /** + * If set to true, Envoy will :ref:`exclude ` new hosts + * when computing load balancing weights until they have been health checked for the first time. + * This will have no effect unless active health checking is also configured. + */ + 'ignore_new_hosts_until_first_hc'?: (boolean); + /** + * If set to ``true``, the cluster manager will drain all existing + * connections to upstream hosts whenever hosts are added or removed from the cluster. + */ + 'close_connections_on_host_set_change'?: (boolean); + /** + * Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + */ + 'consistent_hashing_lb_config'?: (_envoy_config_cluster_v3_Cluster_CommonLbConfig_ConsistentHashingLbConfig | null); + /** + * This controls what hosts are considered valid when using + * :ref:`host overrides `, which is used by some + * filters to modify the load balancing decision. + * + * If this is unset then [UNKNOWN, HEALTHY, DEGRADED] will be applied by default. If this is + * set with an empty set of statuses then host overrides will be ignored by the load balancing. + */ + 'override_host_status'?: (_envoy_config_core_v3_HealthStatusSet | null); + 'locality_config_specifier'?: "zone_aware_lb_config"|"locality_weighted_lb_config"; +} + +/** + * Common configuration for all load balancer implementations. + * [#next-free-field: 9] + */ +export interface _envoy_config_cluster_v3_Cluster_CommonLbConfig__Output { + /** + * Configures the :ref:`healthy panic threshold `. + * If not specified, the default is 50%. + * To disable panic mode, set to 0%. + * + * .. note:: + * The specified percent will be truncated to the nearest 1%. + */ + 'healthy_panic_threshold': (_envoy_type_v3_Percent__Output | null); + 'zone_aware_lb_config'?: (_envoy_config_cluster_v3_Cluster_CommonLbConfig_ZoneAwareLbConfig__Output | null); + 'locality_weighted_lb_config'?: (_envoy_config_cluster_v3_Cluster_CommonLbConfig_LocalityWeightedLbConfig__Output | null); + /** + * If set, all health check/weight/metadata updates that happen within this duration will be + * merged and delivered in one shot when the duration expires. The start of the duration is when + * the first update happens. This is useful for big clusters, with potentially noisy deploys + * that might trigger excessive CPU usage due to a constant stream of healthcheck state changes + * or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new + * cluster). Please always keep in mind that the use of sandbox technologies may change this + * behavior. + * + * If this is not set, we default to a merge window of 1000ms. To disable it, set the merge + * window to 0. + * + * Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is + * because merging those updates isn't currently safe. See + * https://github.com/envoyproxy/envoy/pull/3941. + */ + 'update_merge_window': (_google_protobuf_Duration__Output | null); + /** + * If set to true, Envoy will :ref:`exclude ` new hosts + * when computing load balancing weights until they have been health checked for the first time. + * This will have no effect unless active health checking is also configured. + */ + 'ignore_new_hosts_until_first_hc': (boolean); + /** + * If set to ``true``, the cluster manager will drain all existing + * connections to upstream hosts whenever hosts are added or removed from the cluster. + */ + 'close_connections_on_host_set_change': (boolean); + /** + * Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + */ + 'consistent_hashing_lb_config': (_envoy_config_cluster_v3_Cluster_CommonLbConfig_ConsistentHashingLbConfig__Output | null); + /** + * This controls what hosts are considered valid when using + * :ref:`host overrides `, which is used by some + * filters to modify the load balancing decision. + * + * If this is unset then [UNKNOWN, HEALTHY, DEGRADED] will be applied by default. If this is + * set with an empty set of statuses then host overrides will be ignored by the load balancing. + */ + 'override_host_status': (_envoy_config_core_v3_HealthStatusSet__Output | null); + 'locality_config_specifier': "zone_aware_lb_config"|"locality_weighted_lb_config"; +} + +/** + * Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + */ +export interface _envoy_config_cluster_v3_Cluster_CommonLbConfig_ConsistentHashingLbConfig { + /** + * If set to ``true``, the cluster will use hostname instead of the resolved + * address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. + */ + 'use_hostname_for_hashing'?: (boolean); + /** + * Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150 + * no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster. + * If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200. + * Minimum is 100. + * + * Applies to both Ring Hash and Maglev load balancers. + * + * This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified + * ``hash_balance_factor``, requests to any upstream host are capped at ``hash_balance_factor/100`` times the average number of requests + * across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing + * is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify + * the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the + * cascading overflow effect when choosing the next host in the ring/table). + * + * If weights are specified on the hosts, they are respected. + * + * This is an O(N) algorithm, unlike other load balancers. Using a lower ``hash_balance_factor`` results in more hosts + * being probed, so use a higher value if you require better performance. + */ + 'hash_balance_factor'?: (_google_protobuf_UInt32Value | null); +} + +/** + * Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + */ +export interface _envoy_config_cluster_v3_Cluster_CommonLbConfig_ConsistentHashingLbConfig__Output { + /** + * If set to ``true``, the cluster will use hostname instead of the resolved + * address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. + */ + 'use_hostname_for_hashing': (boolean); + /** + * Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150 + * no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster. + * If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200. + * Minimum is 100. + * + * Applies to both Ring Hash and Maglev load balancers. + * + * This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified + * ``hash_balance_factor``, requests to any upstream host are capped at ``hash_balance_factor/100`` times the average number of requests + * across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing + * is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify + * the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the + * cascading overflow effect when choosing the next host in the ring/table). + * + * If weights are specified on the hosts, they are respected. + * + * This is an O(N) algorithm, unlike other load balancers. Using a lower ``hash_balance_factor`` results in more hosts + * being probed, so use a higher value if you require better performance. + */ + 'hash_balance_factor': (_google_protobuf_UInt32Value__Output | null); +} + +/** + * Extended cluster type. + */ +export interface _envoy_config_cluster_v3_Cluster_CustomClusterType { + /** + * The type of the cluster to instantiate. The name must match a supported cluster type. + */ + 'name'?: (string); + /** + * Cluster specific configuration which depends on the cluster being instantiated. + * See the supported cluster for further documentation. + * [#extension-category: envoy.clusters] + */ + 'typed_config'?: (_google_protobuf_Any | null); +} + +/** + * Extended cluster type. + */ +export interface _envoy_config_cluster_v3_Cluster_CustomClusterType__Output { + /** + * The type of the cluster to instantiate. The name must match a supported cluster type. + */ + 'name': (string); + /** + * Cluster specific configuration which depends on the cluster being instantiated. + * See the supported cluster for further documentation. + * [#extension-category: envoy.clusters] + */ + 'typed_config': (_google_protobuf_Any__Output | null); +} + +// Original file: deps/envoy-api/envoy/config/cluster/v3/cluster.proto + +/** + * Refer to :ref:`service discovery type ` + * for an explanation on each type. + */ +export const _envoy_config_cluster_v3_Cluster_DiscoveryType = { + /** + * Refer to the :ref:`static discovery type` + * for an explanation. + */ + STATIC: 'STATIC', + /** + * Refer to the :ref:`strict DNS discovery + * type` + * for an explanation. + */ + STRICT_DNS: 'STRICT_DNS', + /** + * Refer to the :ref:`logical DNS discovery + * type` + * for an explanation. + */ + LOGICAL_DNS: 'LOGICAL_DNS', + /** + * Refer to the :ref:`service discovery type` + * for an explanation. + */ + EDS: 'EDS', + /** + * Refer to the :ref:`original destination discovery + * type` + * for an explanation. + */ + ORIGINAL_DST: 'ORIGINAL_DST', +} as const; + +/** + * Refer to :ref:`service discovery type ` + * for an explanation on each type. + */ +export type _envoy_config_cluster_v3_Cluster_DiscoveryType = + /** + * Refer to the :ref:`static discovery type` + * for an explanation. + */ + | 'STATIC' + | 0 + /** + * Refer to the :ref:`strict DNS discovery + * type` + * for an explanation. + */ + | 'STRICT_DNS' + | 1 + /** + * Refer to the :ref:`logical DNS discovery + * type` + * for an explanation. + */ + | 'LOGICAL_DNS' + | 2 + /** + * Refer to the :ref:`service discovery type` + * for an explanation. + */ + | 'EDS' + | 3 + /** + * Refer to the :ref:`original destination discovery + * type` + * for an explanation. + */ + | 'ORIGINAL_DST' + | 4 + +/** + * Refer to :ref:`service discovery type ` + * for an explanation on each type. + */ +export type _envoy_config_cluster_v3_Cluster_DiscoveryType__Output = typeof _envoy_config_cluster_v3_Cluster_DiscoveryType[keyof typeof _envoy_config_cluster_v3_Cluster_DiscoveryType] + +// Original file: deps/envoy-api/envoy/config/cluster/v3/cluster.proto + +/** + * When V4_ONLY is selected, the DNS resolver will only perform a lookup for + * addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will + * only perform a lookup for addresses in the IPv6 family. If AUTO is + * specified, the DNS resolver will first perform a lookup for addresses in + * the IPv6 family and fallback to a lookup for addresses in the IPv4 family. + * This is semantically equivalent to a non-existent V6_PREFERRED option. + * AUTO is a legacy name that is more opaque than + * necessary and will be deprecated in favor of V6_PREFERRED in a future major version of the API. + * If V4_PREFERRED is specified, the DNS resolver will first perform a lookup for addresses in the + * IPv4 family and fallback to a lookup for addresses in the IPv6 family. i.e., the callback + * target will only get v6 addresses if there were NO v4 addresses to return. + * If ALL is specified, the DNS resolver will perform a lookup for both IPv4 and IPv6 families, + * and return all resolved addresses. When this is used, Happy Eyeballs will be enabled for + * upstream connections. Refer to :ref:`Happy Eyeballs Support ` + * for more information. + * For cluster types other than + * :ref:`STRICT_DNS` and + * :ref:`LOGICAL_DNS`, + * this setting is + * ignored. + * [#next-major-version: deprecate AUTO in favor of a V6_PREFERRED option.] + */ +export const _envoy_config_cluster_v3_Cluster_DnsLookupFamily = { + AUTO: 'AUTO', + V4_ONLY: 'V4_ONLY', + V6_ONLY: 'V6_ONLY', + V4_PREFERRED: 'V4_PREFERRED', + ALL: 'ALL', +} as const; + +/** + * When V4_ONLY is selected, the DNS resolver will only perform a lookup for + * addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will + * only perform a lookup for addresses in the IPv6 family. If AUTO is + * specified, the DNS resolver will first perform a lookup for addresses in + * the IPv6 family and fallback to a lookup for addresses in the IPv4 family. + * This is semantically equivalent to a non-existent V6_PREFERRED option. + * AUTO is a legacy name that is more opaque than + * necessary and will be deprecated in favor of V6_PREFERRED in a future major version of the API. + * If V4_PREFERRED is specified, the DNS resolver will first perform a lookup for addresses in the + * IPv4 family and fallback to a lookup for addresses in the IPv6 family. i.e., the callback + * target will only get v6 addresses if there were NO v4 addresses to return. + * If ALL is specified, the DNS resolver will perform a lookup for both IPv4 and IPv6 families, + * and return all resolved addresses. When this is used, Happy Eyeballs will be enabled for + * upstream connections. Refer to :ref:`Happy Eyeballs Support ` + * for more information. + * For cluster types other than + * :ref:`STRICT_DNS` and + * :ref:`LOGICAL_DNS`, + * this setting is + * ignored. + * [#next-major-version: deprecate AUTO in favor of a V6_PREFERRED option.] + */ +export type _envoy_config_cluster_v3_Cluster_DnsLookupFamily = + | 'AUTO' + | 0 + | 'V4_ONLY' + | 1 + | 'V6_ONLY' + | 2 + | 'V4_PREFERRED' + | 3 + | 'ALL' + | 4 + +/** + * When V4_ONLY is selected, the DNS resolver will only perform a lookup for + * addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will + * only perform a lookup for addresses in the IPv6 family. If AUTO is + * specified, the DNS resolver will first perform a lookup for addresses in + * the IPv6 family and fallback to a lookup for addresses in the IPv4 family. + * This is semantically equivalent to a non-existent V6_PREFERRED option. + * AUTO is a legacy name that is more opaque than + * necessary and will be deprecated in favor of V6_PREFERRED in a future major version of the API. + * If V4_PREFERRED is specified, the DNS resolver will first perform a lookup for addresses in the + * IPv4 family and fallback to a lookup for addresses in the IPv6 family. i.e., the callback + * target will only get v6 addresses if there were NO v4 addresses to return. + * If ALL is specified, the DNS resolver will perform a lookup for both IPv4 and IPv6 families, + * and return all resolved addresses. When this is used, Happy Eyeballs will be enabled for + * upstream connections. Refer to :ref:`Happy Eyeballs Support ` + * for more information. + * For cluster types other than + * :ref:`STRICT_DNS` and + * :ref:`LOGICAL_DNS`, + * this setting is + * ignored. + * [#next-major-version: deprecate AUTO in favor of a V6_PREFERRED option.] + */ +export type _envoy_config_cluster_v3_Cluster_DnsLookupFamily__Output = typeof _envoy_config_cluster_v3_Cluster_DnsLookupFamily[keyof typeof _envoy_config_cluster_v3_Cluster_DnsLookupFamily] + +/** + * Only valid when discovery type is EDS. + */ +export interface _envoy_config_cluster_v3_Cluster_EdsClusterConfig { + /** + * Configuration for the source of EDS updates for this Cluster. + */ + 'eds_config'?: (_envoy_config_core_v3_ConfigSource | null); + /** + * Optional alternative to cluster name to present to EDS. This does not + * have the same restrictions as cluster name, i.e. it may be arbitrary + * length. This may be a xdstp:// URL. + */ + 'service_name'?: (string); +} + +/** + * Only valid when discovery type is EDS. + */ +export interface _envoy_config_cluster_v3_Cluster_EdsClusterConfig__Output { + /** + * Configuration for the source of EDS updates for this Cluster. + */ + 'eds_config': (_envoy_config_core_v3_ConfigSource__Output | null); + /** + * Optional alternative to cluster name to present to EDS. This does not + * have the same restrictions as cluster name, i.e. it may be arbitrary + * length. This may be a xdstp:// URL. + */ + 'service_name': (string); +} + +// Original file: deps/envoy-api/envoy/config/cluster/v3/cluster.proto + +/** + * The hash function used to hash hosts onto the ketama ring. + */ +export const _envoy_config_cluster_v3_Cluster_RingHashLbConfig_HashFunction = { + /** + * Use `xxHash `_, this is the default hash function. + */ + XX_HASH: 'XX_HASH', + /** + * Use `MurmurHash2 `_, this is compatible with + * std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled + * on Linux and not macOS. + */ + MURMUR_HASH_2: 'MURMUR_HASH_2', +} as const; + +/** + * The hash function used to hash hosts onto the ketama ring. + */ +export type _envoy_config_cluster_v3_Cluster_RingHashLbConfig_HashFunction = + /** + * Use `xxHash `_, this is the default hash function. + */ + | 'XX_HASH' + | 0 + /** + * Use `MurmurHash2 `_, this is compatible with + * std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled + * on Linux and not macOS. + */ + | 'MURMUR_HASH_2' + | 1 + +/** + * The hash function used to hash hosts onto the ketama ring. + */ +export type _envoy_config_cluster_v3_Cluster_RingHashLbConfig_HashFunction__Output = typeof _envoy_config_cluster_v3_Cluster_RingHashLbConfig_HashFunction[keyof typeof _envoy_config_cluster_v3_Cluster_RingHashLbConfig_HashFunction] + +// Original file: deps/envoy-api/envoy/config/cluster/v3/cluster.proto + +/** + * Refer to :ref:`load balancer type ` architecture + * overview section for information on each type. + */ +export const _envoy_config_cluster_v3_Cluster_LbPolicy = { + /** + * Refer to the :ref:`round robin load balancing + * policy` + * for an explanation. + */ + ROUND_ROBIN: 'ROUND_ROBIN', + /** + * Refer to the :ref:`least request load balancing + * policy` + * for an explanation. + */ + LEAST_REQUEST: 'LEAST_REQUEST', + /** + * Refer to the :ref:`ring hash load balancing + * policy` + * for an explanation. + */ + RING_HASH: 'RING_HASH', + /** + * Refer to the :ref:`random load balancing + * policy` + * for an explanation. + */ + RANDOM: 'RANDOM', + /** + * Refer to the :ref:`Maglev load balancing policy` + * for an explanation. + */ + MAGLEV: 'MAGLEV', + /** + * This load balancer type must be specified if the configured cluster provides a cluster + * specific load balancer. Consult the configured cluster's documentation for whether to set + * this option or not. + */ + CLUSTER_PROVIDED: 'CLUSTER_PROVIDED', + /** + * Use the new :ref:`load_balancing_policy + * ` field to determine the LB policy. + * This has been deprecated in favor of using the :ref:`load_balancing_policy + * ` field without + * setting any value in :ref:`lb_policy`. + */ + LOAD_BALANCING_POLICY_CONFIG: 'LOAD_BALANCING_POLICY_CONFIG', +} as const; + +/** + * Refer to :ref:`load balancer type ` architecture + * overview section for information on each type. + */ +export type _envoy_config_cluster_v3_Cluster_LbPolicy = + /** + * Refer to the :ref:`round robin load balancing + * policy` + * for an explanation. + */ + | 'ROUND_ROBIN' + | 0 + /** + * Refer to the :ref:`least request load balancing + * policy` + * for an explanation. + */ + | 'LEAST_REQUEST' + | 1 + /** + * Refer to the :ref:`ring hash load balancing + * policy` + * for an explanation. + */ + | 'RING_HASH' + | 2 + /** + * Refer to the :ref:`random load balancing + * policy` + * for an explanation. + */ + | 'RANDOM' + | 3 + /** + * Refer to the :ref:`Maglev load balancing policy` + * for an explanation. + */ + | 'MAGLEV' + | 5 + /** + * This load balancer type must be specified if the configured cluster provides a cluster + * specific load balancer. Consult the configured cluster's documentation for whether to set + * this option or not. + */ + | 'CLUSTER_PROVIDED' + | 6 + /** + * Use the new :ref:`load_balancing_policy + * ` field to determine the LB policy. + * This has been deprecated in favor of using the :ref:`load_balancing_policy + * ` field without + * setting any value in :ref:`lb_policy`. + */ + | 'LOAD_BALANCING_POLICY_CONFIG' + | 7 + +/** + * Refer to :ref:`load balancer type ` architecture + * overview section for information on each type. + */ +export type _envoy_config_cluster_v3_Cluster_LbPolicy__Output = typeof _envoy_config_cluster_v3_Cluster_LbPolicy[keyof typeof _envoy_config_cluster_v3_Cluster_LbPolicy] + +/** + * Optionally divide the endpoints in this cluster into subsets defined by + * endpoint metadata and selected by route and weighted cluster metadata. + * [#next-free-field: 9] + */ +export interface _envoy_config_cluster_v3_Cluster_LbSubsetConfig { + /** + * The behavior used when no endpoint subset matches the selected route's + * metadata. The value defaults to + * :ref:`NO_FALLBACK`. + */ + 'fallback_policy'?: (_envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetFallbackPolicy); + /** + * Specifies the default subset of endpoints used during fallback if + * fallback_policy is + * :ref:`DEFAULT_SUBSET`. + * Each field in default_subset is + * compared to the matching LbEndpoint.Metadata under the ``envoy.lb`` + * namespace. It is valid for no hosts to match, in which case the behavior + * is the same as a fallback_policy of + * :ref:`NO_FALLBACK`. + */ + 'default_subset'?: (_google_protobuf_Struct | null); + /** + * For each entry, LbEndpoint.Metadata's + * ``envoy.lb`` namespace is traversed and a subset is created for each unique + * combination of key and value. For example: + * + * .. code-block:: json + * + * { "subset_selectors": [ + * { "keys": [ "version" ] }, + * { "keys": [ "stage", "hardware_type" ] } + * ]} + * + * A subset is matched when the metadata from the selected route and + * weighted cluster contains the same keys and values as the subset's + * metadata. The same host may appear in multiple subsets. + */ + 'subset_selectors'?: (_envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetSelector)[]; + /** + * If true, routing to subsets will take into account the localities and locality weights of the + * endpoints when making the routing decision. + * + * There are some potential pitfalls associated with enabling this feature, as the resulting + * traffic split after applying both a subset match and locality weights might be undesirable. + * + * Consider for example a situation in which you have 50/50 split across two localities X/Y + * which have 100 hosts each without subsetting. If the subset LB results in X having only 1 + * host selected but Y having 100, then a lot more load is being dumped on the single host in X + * than originally anticipated in the load balancing assignment delivered via EDS. + */ + 'locality_weight_aware'?: (boolean); + /** + * When used with locality_weight_aware, scales the weight of each locality by the ratio + * of hosts in the subset vs hosts in the original subset. This aims to even out the load + * going to an individual locality if said locality is disproportionately affected by the + * subset predicate. + */ + 'scale_locality_weight'?: (boolean); + /** + * If true, when a fallback policy is configured and its corresponding subset fails to find + * a host this will cause any host to be selected instead. + * + * This is useful when using the default subset as the fallback policy, given the default + * subset might become empty. With this option enabled, if that happens the LB will attempt + * to select a host from the entire cluster. + */ + 'panic_mode_any'?: (boolean); + /** + * If true, metadata specified for a metadata key will be matched against the corresponding + * endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value + * and any of the elements in the list matches the criteria. + */ + 'list_as_any'?: (boolean); + /** + * Fallback mechanism that allows to try different route metadata until a host is found. + * If load balancing process, including all its mechanisms (like + * :ref:`fallback_policy`) + * fails to select a host, this policy decides if and how the process is repeated using another metadata. + * + * The value defaults to + * :ref:`METADATA_NO_FALLBACK`. + */ + 'metadata_fallback_policy'?: (_envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy); +} + +/** + * Optionally divide the endpoints in this cluster into subsets defined by + * endpoint metadata and selected by route and weighted cluster metadata. + * [#next-free-field: 9] + */ +export interface _envoy_config_cluster_v3_Cluster_LbSubsetConfig__Output { + /** + * The behavior used when no endpoint subset matches the selected route's + * metadata. The value defaults to + * :ref:`NO_FALLBACK`. + */ + 'fallback_policy': (_envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetFallbackPolicy__Output); + /** + * Specifies the default subset of endpoints used during fallback if + * fallback_policy is + * :ref:`DEFAULT_SUBSET`. + * Each field in default_subset is + * compared to the matching LbEndpoint.Metadata under the ``envoy.lb`` + * namespace. It is valid for no hosts to match, in which case the behavior + * is the same as a fallback_policy of + * :ref:`NO_FALLBACK`. + */ + 'default_subset': (_google_protobuf_Struct__Output | null); + /** + * For each entry, LbEndpoint.Metadata's + * ``envoy.lb`` namespace is traversed and a subset is created for each unique + * combination of key and value. For example: + * + * .. code-block:: json + * + * { "subset_selectors": [ + * { "keys": [ "version" ] }, + * { "keys": [ "stage", "hardware_type" ] } + * ]} + * + * A subset is matched when the metadata from the selected route and + * weighted cluster contains the same keys and values as the subset's + * metadata. The same host may appear in multiple subsets. + */ + 'subset_selectors': (_envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetSelector__Output)[]; + /** + * If true, routing to subsets will take into account the localities and locality weights of the + * endpoints when making the routing decision. + * + * There are some potential pitfalls associated with enabling this feature, as the resulting + * traffic split after applying both a subset match and locality weights might be undesirable. + * + * Consider for example a situation in which you have 50/50 split across two localities X/Y + * which have 100 hosts each without subsetting. If the subset LB results in X having only 1 + * host selected but Y having 100, then a lot more load is being dumped on the single host in X + * than originally anticipated in the load balancing assignment delivered via EDS. + */ + 'locality_weight_aware': (boolean); + /** + * When used with locality_weight_aware, scales the weight of each locality by the ratio + * of hosts in the subset vs hosts in the original subset. This aims to even out the load + * going to an individual locality if said locality is disproportionately affected by the + * subset predicate. + */ + 'scale_locality_weight': (boolean); + /** + * If true, when a fallback policy is configured and its corresponding subset fails to find + * a host this will cause any host to be selected instead. + * + * This is useful when using the default subset as the fallback policy, given the default + * subset might become empty. With this option enabled, if that happens the LB will attempt + * to select a host from the entire cluster. + */ + 'panic_mode_any': (boolean); + /** + * If true, metadata specified for a metadata key will be matched against the corresponding + * endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value + * and any of the elements in the list matches the criteria. + */ + 'list_as_any': (boolean); + /** + * Fallback mechanism that allows to try different route metadata until a host is found. + * If load balancing process, including all its mechanisms (like + * :ref:`fallback_policy`) + * fails to select a host, this policy decides if and how the process is repeated using another metadata. + * + * The value defaults to + * :ref:`METADATA_NO_FALLBACK`. + */ + 'metadata_fallback_policy': (_envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy__Output); +} + +// Original file: deps/envoy-api/envoy/config/cluster/v3/cluster.proto + +/** + * If NO_FALLBACK is selected, a result + * equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, + * any cluster endpoint may be returned (subject to policy, health checks, + * etc). If DEFAULT_SUBSET is selected, load balancing is performed over the + * endpoints matching the values from the default_subset field. + */ +export const _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetFallbackPolicy = { + NO_FALLBACK: 'NO_FALLBACK', + ANY_ENDPOINT: 'ANY_ENDPOINT', + DEFAULT_SUBSET: 'DEFAULT_SUBSET', +} as const; + +/** + * If NO_FALLBACK is selected, a result + * equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, + * any cluster endpoint may be returned (subject to policy, health checks, + * etc). If DEFAULT_SUBSET is selected, load balancing is performed over the + * endpoints matching the values from the default_subset field. + */ +export type _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetFallbackPolicy = + | 'NO_FALLBACK' + | 0 + | 'ANY_ENDPOINT' + | 1 + | 'DEFAULT_SUBSET' + | 2 + +/** + * If NO_FALLBACK is selected, a result + * equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, + * any cluster endpoint may be returned (subject to policy, health checks, + * etc). If DEFAULT_SUBSET is selected, load balancing is performed over the + * endpoints matching the values from the default_subset field. + */ +export type _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetFallbackPolicy__Output = typeof _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetFallbackPolicy[keyof typeof _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetFallbackPolicy] + +// Original file: deps/envoy-api/envoy/config/cluster/v3/cluster.proto + +export const _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy = { + /** + * No fallback. Route metadata will be used as-is. + */ + METADATA_NO_FALLBACK: 'METADATA_NO_FALLBACK', + /** + * A special metadata key ``fallback_list`` will be used to provide variants of metadata to try. + * Value of ``fallback_list`` key has to be a list. Every list element has to be a struct - it will + * be merged with route metadata, overriding keys that appear in both places. + * ``fallback_list`` entries will be used in order until a host is found. + * + * ``fallback_list`` key itself is removed from metadata before subset load balancing is performed. + * + * Example: + * + * for metadata: + * + * .. code-block:: yaml + * + * version: 1.0 + * fallback_list: + * - version: 2.0 + * hardware: c64 + * - hardware: c32 + * - version: 3.0 + * + * at first, metadata: + * + * .. code-block:: json + * + * {"version": "2.0", "hardware": "c64"} + * + * will be used for load balancing. If no host is found, metadata: + * + * .. code-block:: json + * + * {"version": "1.0", "hardware": "c32"} + * + * is next to try. If it still results in no host, finally metadata: + * + * .. code-block:: json + * + * {"version": "3.0"} + * + * is used. + */ + FALLBACK_LIST: 'FALLBACK_LIST', +} as const; + +export type _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy = + /** + * No fallback. Route metadata will be used as-is. + */ + | 'METADATA_NO_FALLBACK' + | 0 + /** + * A special metadata key ``fallback_list`` will be used to provide variants of metadata to try. + * Value of ``fallback_list`` key has to be a list. Every list element has to be a struct - it will + * be merged with route metadata, overriding keys that appear in both places. + * ``fallback_list`` entries will be used in order until a host is found. + * + * ``fallback_list`` key itself is removed from metadata before subset load balancing is performed. + * + * Example: + * + * for metadata: + * + * .. code-block:: yaml + * + * version: 1.0 + * fallback_list: + * - version: 2.0 + * hardware: c64 + * - hardware: c32 + * - version: 3.0 + * + * at first, metadata: + * + * .. code-block:: json + * + * {"version": "2.0", "hardware": "c64"} + * + * will be used for load balancing. If no host is found, metadata: + * + * .. code-block:: json + * + * {"version": "1.0", "hardware": "c32"} + * + * is next to try. If it still results in no host, finally metadata: + * + * .. code-block:: json + * + * {"version": "3.0"} + * + * is used. + */ + | 'FALLBACK_LIST' + | 1 + +export type _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy__Output = typeof _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy[keyof typeof _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy] + +/** + * Specifications for subsets. + */ +export interface _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetSelector { + /** + * List of keys to match with the weighted cluster metadata. + */ + 'keys'?: (string)[]; + /** + * Selects a mode of operation in which each subset has only one host. This mode uses the same rules for + * choosing a host, but updating hosts is faster, especially for large numbers of hosts. + * + * If a match is found to a host, that host will be used regardless of priority levels. + * + * When this mode is enabled, configurations that contain more than one host with the same metadata value for the single key in ``keys`` + * will use only one of the hosts with the given key; no requests will be routed to the others. The cluster gauge + * :ref:`lb_subsets_single_host_per_subset_duplicate` indicates how many duplicates are + * present in the current configuration. + */ + 'single_host_per_subset'?: (boolean); + /** + * The behavior used when no endpoint subset matches the selected route's + * metadata. + */ + 'fallback_policy'?: (_envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy); + /** + * Subset of + * :ref:`keys` used by + * :ref:`KEYS_SUBSET` + * fallback policy. + * It has to be a non empty list if KEYS_SUBSET fallback policy is selected. + * For any other fallback policy the parameter is not used and should not be set. + * Only values also present in + * :ref:`keys` are allowed, but + * ``fallback_keys_subset`` cannot be equal to ``keys``. + */ + 'fallback_keys_subset'?: (string)[]; +} + +/** + * Specifications for subsets. + */ +export interface _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetSelector__Output { + /** + * List of keys to match with the weighted cluster metadata. + */ + 'keys': (string)[]; + /** + * Selects a mode of operation in which each subset has only one host. This mode uses the same rules for + * choosing a host, but updating hosts is faster, especially for large numbers of hosts. + * + * If a match is found to a host, that host will be used regardless of priority levels. + * + * When this mode is enabled, configurations that contain more than one host with the same metadata value for the single key in ``keys`` + * will use only one of the hosts with the given key; no requests will be routed to the others. The cluster gauge + * :ref:`lb_subsets_single_host_per_subset_duplicate` indicates how many duplicates are + * present in the current configuration. + */ + 'single_host_per_subset': (boolean); + /** + * The behavior used when no endpoint subset matches the selected route's + * metadata. + */ + 'fallback_policy': (_envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy__Output); + /** + * Subset of + * :ref:`keys` used by + * :ref:`KEYS_SUBSET` + * fallback policy. + * It has to be a non empty list if KEYS_SUBSET fallback policy is selected. + * For any other fallback policy the parameter is not used and should not be set. + * Only values also present in + * :ref:`keys` are allowed, but + * ``fallback_keys_subset`` cannot be equal to ``keys``. + */ + 'fallback_keys_subset': (string)[]; +} + +// Original file: deps/envoy-api/envoy/config/cluster/v3/cluster.proto + +/** + * Allows to override top level fallback policy per selector. + */ +export const _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy = { + /** + * If NOT_DEFINED top level config fallback policy is used instead. + */ + NOT_DEFINED: 'NOT_DEFINED', + /** + * If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. + */ + NO_FALLBACK: 'NO_FALLBACK', + /** + * If ANY_ENDPOINT is selected, any cluster endpoint may be returned + * (subject to policy, health checks, etc). + */ + ANY_ENDPOINT: 'ANY_ENDPOINT', + /** + * If DEFAULT_SUBSET is selected, load balancing is performed over the + * endpoints matching the values from the default_subset field. + */ + DEFAULT_SUBSET: 'DEFAULT_SUBSET', + /** + * If KEYS_SUBSET is selected, subset selector matching is performed again with metadata + * keys reduced to + * :ref:`fallback_keys_subset`. + * It allows for a fallback to a different, less specific selector if some of the keys of + * the selector are considered optional. + */ + KEYS_SUBSET: 'KEYS_SUBSET', +} as const; + +/** + * Allows to override top level fallback policy per selector. + */ +export type _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy = + /** + * If NOT_DEFINED top level config fallback policy is used instead. + */ + | 'NOT_DEFINED' + | 0 + /** + * If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. + */ + | 'NO_FALLBACK' + | 1 + /** + * If ANY_ENDPOINT is selected, any cluster endpoint may be returned + * (subject to policy, health checks, etc). + */ + | 'ANY_ENDPOINT' + | 2 + /** + * If DEFAULT_SUBSET is selected, load balancing is performed over the + * endpoints matching the values from the default_subset field. + */ + | 'DEFAULT_SUBSET' + | 3 + /** + * If KEYS_SUBSET is selected, subset selector matching is performed again with metadata + * keys reduced to + * :ref:`fallback_keys_subset`. + * It allows for a fallback to a different, less specific selector if some of the keys of + * the selector are considered optional. + */ + | 'KEYS_SUBSET' + | 4 + +/** + * Allows to override top level fallback policy per selector. + */ +export type _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy__Output = typeof _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy[keyof typeof _envoy_config_cluster_v3_Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy] + +/** + * Specific configuration for the LeastRequest load balancing policy. + */ +export interface _envoy_config_cluster_v3_Cluster_LeastRequestLbConfig { + /** + * The number of random healthy hosts from which the host with the fewest active requests will + * be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. + */ + 'choice_count'?: (_google_protobuf_UInt32Value | null); + /** + * The following formula is used to calculate the dynamic weights when hosts have different load + * balancing weights: + * + * ``weight = load_balancing_weight / (active_requests + 1)^active_request_bias`` + * + * The larger the active request bias is, the more aggressively active requests will lower the + * effective weight when all host weights are not equal. + * + * ``active_request_bias`` must be greater than or equal to 0.0. + * + * When ``active_request_bias == 0.0`` the Least Request Load Balancer doesn't consider the number + * of active requests at the time it picks a host and behaves like the Round Robin Load + * Balancer. + * + * When ``active_request_bias > 0.0`` the Least Request Load Balancer scales the load balancing + * weight by the number of active requests at the time it does a pick. + * + * The value is cached for performance reasons and refreshed whenever one of the Load Balancer's + * host sets changes, e.g., whenever there is a host membership update or a host load balancing + * weight change. + * + * .. note:: + * This setting only takes effect if all host weights are not equal. + */ + 'active_request_bias'?: (_envoy_config_core_v3_RuntimeDouble | null); + /** + * Configuration for slow start mode. + * If this configuration is not set, slow start will not be not enabled. + */ + 'slow_start_config'?: (_envoy_config_cluster_v3_Cluster_SlowStartConfig | null); +} + +/** + * Specific configuration for the LeastRequest load balancing policy. + */ +export interface _envoy_config_cluster_v3_Cluster_LeastRequestLbConfig__Output { + /** + * The number of random healthy hosts from which the host with the fewest active requests will + * be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. + */ + 'choice_count': (_google_protobuf_UInt32Value__Output | null); + /** + * The following formula is used to calculate the dynamic weights when hosts have different load + * balancing weights: + * + * ``weight = load_balancing_weight / (active_requests + 1)^active_request_bias`` + * + * The larger the active request bias is, the more aggressively active requests will lower the + * effective weight when all host weights are not equal. + * + * ``active_request_bias`` must be greater than or equal to 0.0. + * + * When ``active_request_bias == 0.0`` the Least Request Load Balancer doesn't consider the number + * of active requests at the time it picks a host and behaves like the Round Robin Load + * Balancer. + * + * When ``active_request_bias > 0.0`` the Least Request Load Balancer scales the load balancing + * weight by the number of active requests at the time it does a pick. + * + * The value is cached for performance reasons and refreshed whenever one of the Load Balancer's + * host sets changes, e.g., whenever there is a host membership update or a host load balancing + * weight change. + * + * .. note:: + * This setting only takes effect if all host weights are not equal. + */ + 'active_request_bias': (_envoy_config_core_v3_RuntimeDouble__Output | null); + /** + * Configuration for slow start mode. + * If this configuration is not set, slow start will not be not enabled. + */ + 'slow_start_config': (_envoy_config_cluster_v3_Cluster_SlowStartConfig__Output | null); +} + +/** + * Configuration for :ref:`locality weighted load balancing + * ` + */ +export interface _envoy_config_cluster_v3_Cluster_CommonLbConfig_LocalityWeightedLbConfig { +} + +/** + * Configuration for :ref:`locality weighted load balancing + * ` + */ +export interface _envoy_config_cluster_v3_Cluster_CommonLbConfig_LocalityWeightedLbConfig__Output { +} + +/** + * Specific configuration for the :ref:`Maglev` + * load balancing policy. + */ +export interface _envoy_config_cluster_v3_Cluster_MaglevLbConfig { + /** + * The table size for Maglev hashing. Maglev aims for "minimal disruption" rather than an absolute guarantee. + * Minimal disruption means that when the set of upstream hosts change, a connection will likely be sent to the same + * upstream as it was before. Increasing the table size reduces the amount of disruption. + * The table size must be prime number limited to 5000011. If it is not specified, the default is 65537. + */ + 'table_size'?: (_google_protobuf_UInt64Value | null); +} + +/** + * Specific configuration for the :ref:`Maglev` + * load balancing policy. + */ +export interface _envoy_config_cluster_v3_Cluster_MaglevLbConfig__Output { + /** + * The table size for Maglev hashing. Maglev aims for "minimal disruption" rather than an absolute guarantee. + * Minimal disruption means that when the set of upstream hosts change, a connection will likely be sent to the same + * upstream as it was before. Increasing the table size reduces the amount of disruption. + * The table size must be prime number limited to 5000011. If it is not specified, the default is 65537. + */ + 'table_size': (_google_protobuf_UInt64Value__Output | null); +} + +/** + * Specific configuration for the + * :ref:`Original Destination ` + * load balancing policy. + * [#extension: envoy.clusters.original_dst] + */ +export interface _envoy_config_cluster_v3_Cluster_OriginalDstLbConfig { + /** + * When true, a HTTP header can be used to override the original dst address. The default header is + * :ref:`x-envoy-original-dst-host `. + * + * .. attention:: + * + * This header isn't sanitized by default, so enabling this feature allows HTTP clients to + * route traffic to arbitrary hosts and/or ports, which may have serious security + * consequences. + * + * .. note:: + * + * If the header appears multiple times only the first value is used. + */ + 'use_http_header'?: (boolean); + /** + * The http header to override destination address if :ref:`use_http_header `. + * is set to true. If the value is empty, :ref:`x-envoy-original-dst-host ` will be used. + */ + 'http_header_name'?: (string); + /** + * The port to override for the original dst address. This port + * will take precedence over filter state and header override ports + */ + 'upstream_port_override'?: (_google_protobuf_UInt32Value | null); +} + +/** + * Specific configuration for the + * :ref:`Original Destination ` + * load balancing policy. + * [#extension: envoy.clusters.original_dst] + */ +export interface _envoy_config_cluster_v3_Cluster_OriginalDstLbConfig__Output { + /** + * When true, a HTTP header can be used to override the original dst address. The default header is + * :ref:`x-envoy-original-dst-host `. + * + * .. attention:: + * + * This header isn't sanitized by default, so enabling this feature allows HTTP clients to + * route traffic to arbitrary hosts and/or ports, which may have serious security + * consequences. + * + * .. note:: + * + * If the header appears multiple times only the first value is used. + */ + 'use_http_header': (boolean); + /** + * The http header to override destination address if :ref:`use_http_header `. + * is set to true. If the value is empty, :ref:`x-envoy-original-dst-host ` will be used. + */ + 'http_header_name': (string); + /** + * The port to override for the original dst address. This port + * will take precedence over filter state and header override ports + */ + 'upstream_port_override': (_google_protobuf_UInt32Value__Output | null); +} + +export interface _envoy_config_cluster_v3_Cluster_PreconnectPolicy { + /** + * Indicates how many streams (rounded up) can be anticipated per-upstream for each + * incoming stream. This is useful for high-QPS or latency-sensitive services. Preconnecting + * will only be done if the upstream is healthy and the cluster has traffic. + * + * For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be + * established, one for the new incoming stream, and one for a presumed follow-up stream. For + * HTTP/2, only one connection would be established by default as one connection can + * serve both the original and presumed follow-up stream. + * + * In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100 + * active streams, there would be 100 connections in use, and 50 connections preconnected. + * This might be a useful value for something like short lived single-use connections, + * for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection + * termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP + * or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more + * reasonable, where for every 100 connections, 5 preconnected connections would be in the queue + * in case of unexpected disconnects where the connection could not be reused. + * + * If this value is not set, or set explicitly to one, Envoy will fetch as many connections + * as needed to serve streams in flight. This means in steady state if a connection is torn down, + * a subsequent streams will pay an upstream-rtt latency penalty waiting for a new connection. + * + * This is limited somewhat arbitrarily to 3 because preconnecting too aggressively can + * harm latency more than the preconnecting helps. + */ + 'per_upstream_preconnect_ratio'?: (_google_protobuf_DoubleValue | null); + /** + * Indicates how many streams (rounded up) can be anticipated across a cluster for each + * stream, useful for low QPS services. This is currently supported for a subset of + * deterministic non-hash-based load-balancing algorithms (weighted round robin, random). + * Unlike ``per_upstream_preconnect_ratio`` this preconnects across the upstream instances in a + * cluster, doing best effort predictions of what upstream would be picked next and + * pre-establishing a connection. + * + * Preconnecting will be limited to one preconnect per configured upstream in the cluster and will + * only be done if there are healthy upstreams and the cluster has traffic. + * + * For example if preconnecting is set to 2 for a round robin HTTP/2 cluster, on the first + * incoming stream, 2 connections will be preconnected - one to the first upstream for this + * cluster, one to the second on the assumption there will be a follow-up stream. + * + * If this value is not set, or set explicitly to one, Envoy will fetch as many connections + * as needed to serve streams in flight, so during warm up and in steady state if a connection + * is closed (and per_upstream_preconnect_ratio is not set), there will be a latency hit for + * connection establishment. + * + * If both this and preconnect_ratio are set, Envoy will make sure both predicted needs are met, + * basically preconnecting max(predictive-preconnect, per-upstream-preconnect), for each + * upstream. + */ + 'predictive_preconnect_ratio'?: (_google_protobuf_DoubleValue | null); +} + +export interface _envoy_config_cluster_v3_Cluster_PreconnectPolicy__Output { + /** + * Indicates how many streams (rounded up) can be anticipated per-upstream for each + * incoming stream. This is useful for high-QPS or latency-sensitive services. Preconnecting + * will only be done if the upstream is healthy and the cluster has traffic. + * + * For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be + * established, one for the new incoming stream, and one for a presumed follow-up stream. For + * HTTP/2, only one connection would be established by default as one connection can + * serve both the original and presumed follow-up stream. + * + * In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100 + * active streams, there would be 100 connections in use, and 50 connections preconnected. + * This might be a useful value for something like short lived single-use connections, + * for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection + * termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP + * or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more + * reasonable, where for every 100 connections, 5 preconnected connections would be in the queue + * in case of unexpected disconnects where the connection could not be reused. + * + * If this value is not set, or set explicitly to one, Envoy will fetch as many connections + * as needed to serve streams in flight. This means in steady state if a connection is torn down, + * a subsequent streams will pay an upstream-rtt latency penalty waiting for a new connection. + * + * This is limited somewhat arbitrarily to 3 because preconnecting too aggressively can + * harm latency more than the preconnecting helps. + */ + 'per_upstream_preconnect_ratio': (_google_protobuf_DoubleValue__Output | null); + /** + * Indicates how many streams (rounded up) can be anticipated across a cluster for each + * stream, useful for low QPS services. This is currently supported for a subset of + * deterministic non-hash-based load-balancing algorithms (weighted round robin, random). + * Unlike ``per_upstream_preconnect_ratio`` this preconnects across the upstream instances in a + * cluster, doing best effort predictions of what upstream would be picked next and + * pre-establishing a connection. + * + * Preconnecting will be limited to one preconnect per configured upstream in the cluster and will + * only be done if there are healthy upstreams and the cluster has traffic. + * + * For example if preconnecting is set to 2 for a round robin HTTP/2 cluster, on the first + * incoming stream, 2 connections will be preconnected - one to the first upstream for this + * cluster, one to the second on the assumption there will be a follow-up stream. + * + * If this value is not set, or set explicitly to one, Envoy will fetch as many connections + * as needed to serve streams in flight, so during warm up and in steady state if a connection + * is closed (and per_upstream_preconnect_ratio is not set), there will be a latency hit for + * connection establishment. + * + * If both this and preconnect_ratio are set, Envoy will make sure both predicted needs are met, + * basically preconnecting max(predictive-preconnect, per-upstream-preconnect), for each + * upstream. + */ + 'predictive_preconnect_ratio': (_google_protobuf_DoubleValue__Output | null); +} + +export interface _envoy_config_cluster_v3_Cluster_RefreshRate { + /** + * Specifies the base interval between refreshes. This parameter is required and must be greater + * than zero and less than + * :ref:`max_interval `. + */ + 'base_interval'?: (_google_protobuf_Duration | null); + /** + * Specifies the maximum interval between refreshes. This parameter is optional, but must be + * greater than or equal to the + * :ref:`base_interval ` if set. The default + * is 10 times the :ref:`base_interval `. + */ + 'max_interval'?: (_google_protobuf_Duration | null); +} + +export interface _envoy_config_cluster_v3_Cluster_RefreshRate__Output { + /** + * Specifies the base interval between refreshes. This parameter is required and must be greater + * than zero and less than + * :ref:`max_interval `. + */ + 'base_interval': (_google_protobuf_Duration__Output | null); + /** + * Specifies the maximum interval between refreshes. This parameter is optional, but must be + * greater than or equal to the + * :ref:`base_interval ` if set. The default + * is 10 times the :ref:`base_interval `. + */ + 'max_interval': (_google_protobuf_Duration__Output | null); +} + +/** + * Specific configuration for the :ref:`RingHash` + * load balancing policy. + */ +export interface _envoy_config_cluster_v3_Cluster_RingHashLbConfig { + /** + * Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each + * provided host) the better the request distribution will reflect the desired weights. Defaults + * to 1024 entries, and limited to 8M entries. See also + * :ref:`maximum_ring_size`. + */ + 'minimum_ring_size'?: (_google_protobuf_UInt64Value | null); + /** + * The hash function used to hash hosts onto the ketama ring. The value defaults to + * :ref:`XX_HASH`. + */ + 'hash_function'?: (_envoy_config_cluster_v3_Cluster_RingHashLbConfig_HashFunction); + /** + * Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered + * to further constrain resource use. See also + * :ref:`minimum_ring_size`. + */ + 'maximum_ring_size'?: (_google_protobuf_UInt64Value | null); +} + +/** + * Specific configuration for the :ref:`RingHash` + * load balancing policy. + */ +export interface _envoy_config_cluster_v3_Cluster_RingHashLbConfig__Output { + /** + * Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each + * provided host) the better the request distribution will reflect the desired weights. Defaults + * to 1024 entries, and limited to 8M entries. See also + * :ref:`maximum_ring_size`. + */ + 'minimum_ring_size': (_google_protobuf_UInt64Value__Output | null); + /** + * The hash function used to hash hosts onto the ketama ring. The value defaults to + * :ref:`XX_HASH`. + */ + 'hash_function': (_envoy_config_cluster_v3_Cluster_RingHashLbConfig_HashFunction__Output); + /** + * Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered + * to further constrain resource use. See also + * :ref:`minimum_ring_size`. + */ + 'maximum_ring_size': (_google_protobuf_UInt64Value__Output | null); +} + +/** + * Specific configuration for the RoundRobin load balancing policy. + */ +export interface _envoy_config_cluster_v3_Cluster_RoundRobinLbConfig { + /** + * Configuration for slow start mode. + * If this configuration is not set, slow start will not be not enabled. + */ + 'slow_start_config'?: (_envoy_config_cluster_v3_Cluster_SlowStartConfig | null); +} + +/** + * Specific configuration for the RoundRobin load balancing policy. + */ +export interface _envoy_config_cluster_v3_Cluster_RoundRobinLbConfig__Output { + /** + * Configuration for slow start mode. + * If this configuration is not set, slow start will not be not enabled. + */ + 'slow_start_config': (_envoy_config_cluster_v3_Cluster_SlowStartConfig__Output | null); +} + +/** + * Configuration for :ref:`slow start mode `. + */ +export interface _envoy_config_cluster_v3_Cluster_SlowStartConfig { + /** + * Represents the size of slow start window. + * If set, the newly created host remains in slow start mode starting from its creation time + * for the duration of slow start window. + */ + 'slow_start_window'?: (_google_protobuf_Duration | null); + /** + * This parameter controls the speed of traffic increase over the slow start window. Defaults to 1.0, + * so that endpoint would get linearly increasing amount of traffic. + * When increasing the value for this parameter, the speed of traffic ramp-up increases non-linearly. + * The value of aggression parameter should be greater than 0.0. + * By tuning the parameter, is possible to achieve polynomial or exponential shape of ramp-up curve. + * + * During slow start window, effective weight of an endpoint would be scaled with time factor and aggression: + * ``new_weight = weight * max(min_weight_percent, time_factor ^ (1 / aggression))``, + * where ``time_factor=(time_since_start_seconds / slow_start_time_seconds)``. + * + * As time progresses, more and more traffic would be sent to endpoint, which is in slow start window. + * Once host exits slow start, time_factor and aggression no longer affect its weight. + */ + 'aggression'?: (_envoy_config_core_v3_RuntimeDouble | null); + /** + * Configures the minimum percentage of origin weight that avoids too small new weight, + * which may cause endpoints in slow start mode receive no traffic in slow start window. + * If not specified, the default is 10%. + */ + 'min_weight_percent'?: (_envoy_type_v3_Percent | null); +} + +/** + * Configuration for :ref:`slow start mode `. + */ +export interface _envoy_config_cluster_v3_Cluster_SlowStartConfig__Output { + /** + * Represents the size of slow start window. + * If set, the newly created host remains in slow start mode starting from its creation time + * for the duration of slow start window. + */ + 'slow_start_window': (_google_protobuf_Duration__Output | null); + /** + * This parameter controls the speed of traffic increase over the slow start window. Defaults to 1.0, + * so that endpoint would get linearly increasing amount of traffic. + * When increasing the value for this parameter, the speed of traffic ramp-up increases non-linearly. + * The value of aggression parameter should be greater than 0.0. + * By tuning the parameter, is possible to achieve polynomial or exponential shape of ramp-up curve. + * + * During slow start window, effective weight of an endpoint would be scaled with time factor and aggression: + * ``new_weight = weight * max(min_weight_percent, time_factor ^ (1 / aggression))``, + * where ``time_factor=(time_since_start_seconds / slow_start_time_seconds)``. + * + * As time progresses, more and more traffic would be sent to endpoint, which is in slow start window. + * Once host exits slow start, time_factor and aggression no longer affect its weight. + */ + 'aggression': (_envoy_config_core_v3_RuntimeDouble__Output | null); + /** + * Configures the minimum percentage of origin weight that avoids too small new weight, + * which may cause endpoints in slow start mode receive no traffic in slow start window. + * If not specified, the default is 10%. + */ + 'min_weight_percent': (_envoy_type_v3_Percent__Output | null); +} + +/** + * TransportSocketMatch specifies what transport socket config will be used + * when the match conditions are satisfied. + */ +export interface _envoy_config_cluster_v3_Cluster_TransportSocketMatch { + /** + * The name of the match, used in stats generation. + */ + 'name'?: (string); + /** + * Optional endpoint metadata match criteria. + * The connection to the endpoint with metadata matching what is set in this field + * will use the transport socket configuration specified here. + * The endpoint's metadata entry in ``envoy.transport_socket_match`` is used to match + * against the values specified in this field. + */ + 'match'?: (_google_protobuf_Struct | null); + /** + * The configuration of the transport socket. + * [#extension-category: envoy.transport_sockets.upstream] + */ + 'transport_socket'?: (_envoy_config_core_v3_TransportSocket | null); +} + +/** + * TransportSocketMatch specifies what transport socket config will be used + * when the match conditions are satisfied. + */ +export interface _envoy_config_cluster_v3_Cluster_TransportSocketMatch__Output { + /** + * The name of the match, used in stats generation. + */ + 'name': (string); + /** + * Optional endpoint metadata match criteria. + * The connection to the endpoint with metadata matching what is set in this field + * will use the transport socket configuration specified here. + * The endpoint's metadata entry in ``envoy.transport_socket_match`` is used to match + * against the values specified in this field. + */ + 'match': (_google_protobuf_Struct__Output | null); + /** + * The configuration of the transport socket. + * [#extension-category: envoy.transport_sockets.upstream] + */ + 'transport_socket': (_envoy_config_core_v3_TransportSocket__Output | null); +} + +/** + * Configuration for :ref:`zone aware routing + * `. + */ +export interface _envoy_config_cluster_v3_Cluster_CommonLbConfig_ZoneAwareLbConfig { + /** + * Configures percentage of requests that will be considered for zone aware routing + * if zone aware routing is configured. If not specified, the default is 100%. + * * :ref:`runtime values `. + * * :ref:`Zone aware routing support `. + */ + 'routing_enabled'?: (_envoy_type_v3_Percent | null); + /** + * Configures minimum upstream cluster size required for zone aware routing + * If upstream cluster size is less than specified, zone aware routing is not performed + * even if zone aware routing is configured. If not specified, the default is 6. + * * :ref:`runtime values `. + * * :ref:`Zone aware routing support `. + */ + 'min_cluster_size'?: (_google_protobuf_UInt64Value | null); + /** + * If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic + * mode`. Instead, the cluster will fail all + * requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a + * failing service. + */ + 'fail_traffic_on_panic'?: (boolean); +} + +/** + * Configuration for :ref:`zone aware routing + * `. + */ +export interface _envoy_config_cluster_v3_Cluster_CommonLbConfig_ZoneAwareLbConfig__Output { + /** + * Configures percentage of requests that will be considered for zone aware routing + * if zone aware routing is configured. If not specified, the default is 100%. + * * :ref:`runtime values `. + * * :ref:`Zone aware routing support `. + */ + 'routing_enabled': (_envoy_type_v3_Percent__Output | null); + /** + * Configures minimum upstream cluster size required for zone aware routing + * If upstream cluster size is less than specified, zone aware routing is not performed + * even if zone aware routing is configured. If not specified, the default is 6. + * * :ref:`runtime values `. + * * :ref:`Zone aware routing support `. + */ + 'min_cluster_size': (_google_protobuf_UInt64Value__Output | null); + /** + * If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic + * mode`. Instead, the cluster will fail all + * requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a + * failing service. + */ + 'fail_traffic_on_panic': (boolean); +} + +/** + * Configuration for a single upstream cluster. + * [#next-free-field: 57] + */ +export interface Cluster { + /** + * Supplies the name of the cluster which must be unique across all clusters. + * The cluster name is used when emitting + * :ref:`statistics ` if :ref:`alt_stat_name + * ` is not provided. + * Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. + */ + 'name'?: (string); + /** + * The :ref:`service discovery type ` + * to use for resolving the cluster. + */ + 'type'?: (_envoy_config_cluster_v3_Cluster_DiscoveryType); + /** + * Configuration to use for EDS updates for the Cluster. + */ + 'eds_cluster_config'?: (_envoy_config_cluster_v3_Cluster_EdsClusterConfig | null); + /** + * The timeout for new network connections to hosts in the cluster. + * If not set, a default value of 5s will be used. + */ + 'connect_timeout'?: (_google_protobuf_Duration | null); + /** + * Soft limit on size of the cluster’s connections read and write buffers. If + * unspecified, an implementation defined default is applied (1MiB). + */ + 'per_connection_buffer_limit_bytes'?: (_google_protobuf_UInt32Value | null); + /** + * The :ref:`load balancer type ` to use + * when picking a host in the cluster. + */ + 'lb_policy'?: (_envoy_config_cluster_v3_Cluster_LbPolicy); + /** + * Optional :ref:`active health checking ` + * configuration for the cluster. If no + * configuration is specified no health checking will be done and all cluster + * members will be considered healthy at all times. + */ + 'health_checks'?: (_envoy_config_core_v3_HealthCheck)[]; + /** + * Optional maximum requests for a single upstream connection. This parameter + * is respected by both the HTTP/1.1 and HTTP/2 connection pool + * implementations. If not specified, there is no limit. Setting this + * parameter to 1 will effectively disable keep alive. + * + * .. attention:: + * This field has been deprecated in favor of the :ref:`max_requests_per_connection ` field. + * @deprecated + */ + 'max_requests_per_connection'?: (_google_protobuf_UInt32Value | null); + /** + * Optional :ref:`circuit breaking ` for the cluster. + */ + 'circuit_breakers'?: (_envoy_config_cluster_v3_CircuitBreakers | null); + /** + * Additional options when handling HTTP1 requests. + * This has been deprecated in favor of http_protocol_options fields in the + * :ref:`http_protocol_options ` message. + * http_protocol_options can be set via the cluster's + * :ref:`extension_protocol_options`. + * See :ref:`upstream_http_protocol_options + * ` + * for example usage. + * @deprecated + */ + 'http_protocol_options'?: (_envoy_config_core_v3_Http1ProtocolOptions | null); + /** + * Even if default HTTP2 protocol options are desired, this field must be + * set so that Envoy will assume that the upstream supports HTTP/2 when + * making new HTTP connection pool connections. Currently, Envoy only + * supports prior knowledge for upstream connections. Even if TLS is used + * with ALPN, ``http2_protocol_options`` must be specified. As an aside this allows HTTP/2 + * connections to happen over plain text. + * This has been deprecated in favor of http2_protocol_options fields in the + * :ref:`http_protocol_options ` + * message. http2_protocol_options can be set via the cluster's + * :ref:`extension_protocol_options`. + * See :ref:`upstream_http_protocol_options + * ` + * for example usage. + * @deprecated + */ + 'http2_protocol_options'?: (_envoy_config_core_v3_Http2ProtocolOptions | null); + /** + * If the DNS refresh rate is specified and the cluster type is either + * :ref:`STRICT_DNS`, + * or :ref:`LOGICAL_DNS`, + * this value is used as the cluster’s DNS refresh + * rate. The value configured must be at least 1ms. If this setting is not specified, the + * value defaults to 5000ms. For cluster types other than + * :ref:`STRICT_DNS` + * and :ref:`LOGICAL_DNS` + * this setting is ignored. + */ + 'dns_refresh_rate'?: (_google_protobuf_Duration | null); + /** + * The DNS IP address resolution policy. If this setting is not specified, the + * value defaults to + * :ref:`AUTO`. + */ + 'dns_lookup_family'?: (_envoy_config_cluster_v3_Cluster_DnsLookupFamily); + /** + * If DNS resolvers are specified and the cluster type is either + * :ref:`STRICT_DNS`, + * or :ref:`LOGICAL_DNS`, + * this value is used to specify the cluster’s dns resolvers. + * If this setting is not specified, the value defaults to the default + * resolver, which uses /etc/resolv.conf for configuration. For cluster types + * other than + * :ref:`STRICT_DNS` + * and :ref:`LOGICAL_DNS` + * this setting is ignored. + * This field is deprecated in favor of ``dns_resolution_config`` + * which aggregates all of the DNS resolver configuration in a single message. + * @deprecated + */ + 'dns_resolvers'?: (_envoy_config_core_v3_Address)[]; + /** + * If specified, outlier detection will be enabled for this upstream cluster. + * Each of the configuration values can be overridden via + * :ref:`runtime values `. + */ + 'outlier_detection'?: (_envoy_config_cluster_v3_OutlierDetection | null); + /** + * The interval for removing stale hosts from a cluster type + * :ref:`ORIGINAL_DST`. + * Hosts are considered stale if they have not been used + * as upstream destinations during this interval. New hosts are added + * to original destination clusters on demand as new connections are + * redirected to Envoy, causing the number of hosts in the cluster to + * grow over time. Hosts that are not stale (they are actively used as + * destinations) are kept in the cluster, which allows connections to + * them remain open, saving the latency that would otherwise be spent + * on opening new connections. If this setting is not specified, the + * value defaults to 5000ms. For cluster types other than + * :ref:`ORIGINAL_DST` + * this setting is ignored. + */ + 'cleanup_interval'?: (_google_protobuf_Duration | null); + /** + * Optional configuration used to bind newly established upstream connections. + * This overrides any bind_config specified in the bootstrap proto. + * If the address and port are empty, no bind will be performed. + */ + 'upstream_bind_config'?: (_envoy_config_core_v3_BindConfig | null); + /** + * Configuration for load balancing subsetting. + */ + 'lb_subset_config'?: (_envoy_config_cluster_v3_Cluster_LbSubsetConfig | null); + /** + * Optional configuration for the Ring Hash load balancing policy. + */ + 'ring_hash_lb_config'?: (_envoy_config_cluster_v3_Cluster_RingHashLbConfig | null); + /** + * Optional custom transport socket implementation to use for upstream connections. + * To setup TLS, set a transport socket with name ``envoy.transport_sockets.tls`` and + * :ref:`UpstreamTlsContexts ` in the ``typed_config``. + * If no transport socket configuration is specified, new connections + * will be set up with plaintext. + */ + 'transport_socket'?: (_envoy_config_core_v3_TransportSocket | null); + /** + * The Metadata field can be used to provide additional information about the + * cluster. It can be used for stats, logging, and varying filter behavior. + * Fields should use reverse DNS notation to denote which entity within Envoy + * will need the information. For instance, if the metadata is intended for + * the Router filter, the filter name should be specified as ``envoy.filters.http.router``. + */ + 'metadata'?: (_envoy_config_core_v3_Metadata | null); + /** + * Determines how Envoy selects the protocol used to speak to upstream hosts. + * This has been deprecated in favor of setting explicit protocol selection + * in the :ref:`http_protocol_options + * ` message. + * http_protocol_options can be set via the cluster's + * :ref:`extension_protocol_options`. + * @deprecated + */ + 'protocol_selection'?: (_envoy_config_cluster_v3_Cluster_ClusterProtocolSelection); + /** + * Common configuration for all load balancer implementations. + */ + 'common_lb_config'?: (_envoy_config_cluster_v3_Cluster_CommonLbConfig | null); + /** + * An optional alternative to the cluster name to be used for observability. This name is used + * emitting stats for the cluster and access logging the cluster name. This will appear as + * additional information in configuration dumps of a cluster's current status as + * :ref:`observability_name ` + * and as an additional tag "upstream_cluster.name" while tracing. Note: Any ``:`` in the name + * will be converted to ``_`` when emitting statistics. This should not be confused with + * :ref:`Router Filter Header `. + */ + 'alt_stat_name'?: (string); + /** + * Additional options when handling HTTP requests upstream. These options will be applicable to + * both HTTP1 and HTTP2 requests. + * This has been deprecated in favor of + * :ref:`common_http_protocol_options ` + * in the :ref:`http_protocol_options ` message. + * common_http_protocol_options can be set via the cluster's + * :ref:`extension_protocol_options`. + * See :ref:`upstream_http_protocol_options + * ` + * for example usage. + * @deprecated + */ + 'common_http_protocol_options'?: (_envoy_config_core_v3_HttpProtocolOptions | null); + /** + * Optional options for upstream connections. + */ + 'upstream_connection_options'?: (_envoy_config_cluster_v3_UpstreamConnectionOptions | null); + /** + * If an upstream host becomes unhealthy (as determined by the configured health checks + * or outlier detection), immediately close all connections to the failed host. + * + * .. note:: + * + * This is currently only supported for connections created by tcp_proxy. + * + * .. note:: + * + * The current implementation of this feature closes all connections immediately when + * the unhealthy status is detected. If there are a large number of connections open + * to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of + * time exclusively closing these connections, and not processing any other traffic. + */ + 'close_connections_on_host_health_failure'?: (boolean); + /** + * If set to true, Envoy will ignore the health value of a host when processing its removal + * from service discovery. This means that if active health checking is used, Envoy will *not* + * wait for the endpoint to go unhealthy before removing it. + */ + 'ignore_health_on_host_removal'?: (boolean); + /** + * Setting this is required for specifying members of + * :ref:`STATIC`, + * :ref:`STRICT_DNS` + * or :ref:`LOGICAL_DNS` clusters. + * This field supersedes the ``hosts`` field in the v2 API. + * + * .. attention:: + * + * Setting this allows non-EDS cluster types to contain embedded EDS equivalent + * :ref:`endpoint assignments`. + */ + 'load_assignment'?: (_envoy_config_endpoint_v3_ClusterLoadAssignment | null); + /** + * Optional configuration for the Original Destination load balancing policy. + */ + 'original_dst_lb_config'?: (_envoy_config_cluster_v3_Cluster_OriginalDstLbConfig | null); + /** + * The extension_protocol_options field is used to provide extension-specific protocol options + * for upstream connections. The key should match the extension filter name, such as + * "envoy.filters.network.thrift_proxy". See the extension's documentation for details on + * specific options. + * [#next-major-version: make this a list of typed extensions.] + */ + 'typed_extension_protocol_options'?: ({[key: string]: _google_protobuf_Any}); + /** + * Optional configuration for the LeastRequest load balancing policy. + */ + 'least_request_lb_config'?: (_envoy_config_cluster_v3_Cluster_LeastRequestLbConfig | null); + /** + * The custom cluster type. + */ + 'cluster_type'?: (_envoy_config_cluster_v3_Cluster_CustomClusterType | null); + /** + * Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, + * cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS + * resolution. + */ + 'respect_dns_ttl'?: (boolean); + /** + * An (optional) network filter chain, listed in the order the filters should be applied. + * The chain will be applied to all outgoing connections that Envoy makes to the upstream + * servers of this cluster. + */ + 'filters'?: (_envoy_config_cluster_v3_Filter)[]; + /** + * If this field is set and is supported by the client, it will supersede the value of + * :ref:`lb_policy`. + */ + 'load_balancing_policy'?: (_envoy_config_cluster_v3_LoadBalancingPolicy | null); + /** + * [#not-implemented-hide:] + * If present, tells the client where to send load reports via LRS. If not present, the + * client will fall back to a client-side default, which may be either (a) don't send any + * load reports or (b) send load reports for all clusters to a single default server + * (which may be configured in the bootstrap file). + * + * Note that if multiple clusters point to the same LRS server, the client may choose to + * create a separate stream for each cluster or it may choose to coalesce the data for + * multiple clusters onto a single stream. Either way, the client must make sure to send + * the data for any given cluster on no more than one stream. + * + * [#next-major-version: In the v3 API, we should consider restructuring this somehow, + * maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation + * from the LRS stream here.] + */ + 'lrs_server'?: (_envoy_config_core_v3_ConfigSource | null); + /** + * Configuration to use different transport sockets for different endpoints. + * The entry of ``envoy.transport_socket_match`` in the + * :ref:`LbEndpoint.Metadata ` + * is used to match against the transport sockets as they appear in the list. The first + * :ref:`match ` is used. + * For example, with the following match + * + * .. code-block:: yaml + * + * transport_socket_matches: + * - name: "enableMTLS" + * match: + * acceptMTLS: true + * transport_socket: + * name: envoy.transport_sockets.tls + * config: { ... } # tls socket configuration + * - name: "defaultToPlaintext" + * match: {} + * transport_socket: + * name: envoy.transport_sockets.raw_buffer + * + * Connections to the endpoints whose metadata value under ``envoy.transport_socket_match`` + * having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. + * + * If a :ref:`socket match ` with empty match + * criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" + * socket match in case above. + * + * If an endpoint metadata's value under ``envoy.transport_socket_match`` does not match any + * ``TransportSocketMatch``, socket configuration fallbacks to use the ``tls_context`` or + * ``transport_socket`` specified in this cluster. + * + * This field allows gradual and flexible transport socket configuration changes. + * + * The metadata of endpoints in EDS can indicate transport socket capabilities. For example, + * an endpoint's metadata can have two key value pairs as "acceptMTLS": "true", + * "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic + * has "acceptPlaintext": "true" metadata information. + * + * Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS + * traffic for endpoints with "acceptMTLS": "true", by adding a corresponding + * ``TransportSocketMatch`` in this field. Other client Envoys receive CDS without + * ``transport_socket_match`` set, and still send plain text traffic to the same cluster. + * + * This field can be used to specify custom transport socket configurations for health + * checks by adding matching key/value pairs in a health check's + * :ref:`transport socket match criteria ` field. + * + * [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] + */ + 'transport_socket_matches'?: (_envoy_config_cluster_v3_Cluster_TransportSocketMatch)[]; + /** + * If the DNS failure refresh rate is specified and the cluster type is either + * :ref:`STRICT_DNS`, + * or :ref:`LOGICAL_DNS`, + * this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is + * not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types + * other than :ref:`STRICT_DNS` and + * :ref:`LOGICAL_DNS` this setting is + * ignored. + */ + 'dns_failure_refresh_rate'?: (_envoy_config_cluster_v3_Cluster_RefreshRate | null); + /** + * Always use TCP queries instead of UDP queries for DNS lookups. + * This field is deprecated in favor of ``dns_resolution_config`` + * which aggregates all of the DNS resolver configuration in a single message. + * @deprecated + */ + 'use_tcp_for_dns_lookups'?: (boolean); + /** + * HTTP protocol options that are applied only to upstream HTTP connections. + * These options apply to all HTTP versions. + * This has been deprecated in favor of + * :ref:`upstream_http_protocol_options ` + * in the :ref:`http_protocol_options ` message. + * upstream_http_protocol_options can be set via the cluster's + * :ref:`extension_protocol_options`. + * See :ref:`upstream_http_protocol_options + * ` + * for example usage. + * @deprecated + */ + 'upstream_http_protocol_options'?: (_envoy_config_core_v3_UpstreamHttpProtocolOptions | null); + /** + * If track_timeout_budgets is true, the :ref:`timeout budget histograms + * ` will be published for each + * request. These show what percentage of a request's per try and global timeout was used. A value + * of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + * of 100 would indicate that the request took the entirety of the timeout given to it. + * + * .. attention:: + * + * This field has been deprecated in favor of ``timeout_budgets``, part of + * :ref:`track_cluster_stats `. + * @deprecated + */ + 'track_timeout_budgets'?: (boolean); + /** + * Optional customization and configuration of upstream connection pool, and upstream type. + * + * Currently this field only applies for HTTP traffic but is designed for eventual use for custom + * TCP upstreams. + * + * For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream + * HTTP, using the http connection pool and the codec from ``http2_protocol_options`` + * + * For routes where CONNECT termination is configured, Envoy will take downstream CONNECT + * requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool. + * + * The default pool used is the generic connection pool which creates the HTTP upstream for most + * HTTP requests, and the TCP upstream if CONNECT termination is configured. + * + * If users desire custom connection pool or upstream behavior, for example terminating + * CONNECT only if a custom filter indicates it is appropriate, the custom factories + * can be registered and configured here. + * [#extension-category: envoy.upstreams] + */ + 'upstream_config'?: (_envoy_config_core_v3_TypedExtensionConfig | null); + /** + * Configuration to track optional cluster stats. + */ + 'track_cluster_stats'?: (_envoy_config_cluster_v3_TrackClusterStats | null); + /** + * Preconnect configuration for this cluster. + */ + 'preconnect_policy'?: (_envoy_config_cluster_v3_Cluster_PreconnectPolicy | null); + /** + * If ``connection_pool_per_downstream_connection`` is true, the cluster will use a separate + * connection pool for every downstream connection + */ + 'connection_pool_per_downstream_connection'?: (boolean); + /** + * Optional configuration for the Maglev load balancing policy. + */ + 'maglev_lb_config'?: (_envoy_config_cluster_v3_Cluster_MaglevLbConfig | null); + /** + * DNS resolution configuration which includes the underlying dns resolver addresses and options. + * This field is deprecated in favor of + * :ref:`typed_dns_resolver_config `. + * @deprecated + */ + 'dns_resolution_config'?: (_envoy_config_core_v3_DnsResolutionConfig | null); + /** + * Optional configuration for having cluster readiness block on warm-up. Currently, only applicable for + * :ref:`STRICT_DNS`, + * or :ref:`LOGICAL_DNS`, + * or :ref:`Redis Cluster`. + * If true, cluster readiness blocks on warm-up. If false, the cluster will complete + * initialization whether or not warm-up has completed. Defaults to true. + */ + 'wait_for_warm_on_init'?: (_google_protobuf_BoolValue | null); + /** + * DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, + * or any other DNS resolver types and the related parameters. + * For example, an object of + * :ref:`CaresDnsResolverConfig ` + * can be packed into this ``typed_dns_resolver_config``. This configuration replaces the + * :ref:`dns_resolution_config ` + * configuration. + * During the transition period when both ``dns_resolution_config`` and ``typed_dns_resolver_config`` exists, + * when ``typed_dns_resolver_config`` is in place, Envoy will use it and ignore ``dns_resolution_config``. + * When ``typed_dns_resolver_config`` is missing, the default behavior is in place. + * [#extension-category: envoy.network.dns_resolver] + */ + 'typed_dns_resolver_config'?: (_envoy_config_core_v3_TypedExtensionConfig | null); + /** + * Optional configuration for the RoundRobin load balancing policy. + */ + 'round_robin_lb_config'?: (_envoy_config_cluster_v3_Cluster_RoundRobinLbConfig | null); + 'cluster_discovery_type'?: "type"|"cluster_type"; + /** + * Optional configuration for the load balancing algorithm selected by + * LbPolicy. Currently only + * :ref:`RING_HASH`, + * :ref:`MAGLEV` and + * :ref:`LEAST_REQUEST` + * has additional configuration options. + * Specifying ring_hash_lb_config or maglev_lb_config or least_request_lb_config without setting the corresponding + * LbPolicy will generate an error at runtime. + */ + 'lb_config'?: "ring_hash_lb_config"|"maglev_lb_config"|"original_dst_lb_config"|"least_request_lb_config"|"round_robin_lb_config"; +} + +/** + * Configuration for a single upstream cluster. + * [#next-free-field: 57] + */ +export interface Cluster__Output { + /** + * Supplies the name of the cluster which must be unique across all clusters. + * The cluster name is used when emitting + * :ref:`statistics ` if :ref:`alt_stat_name + * ` is not provided. + * Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. + */ + 'name': (string); + /** + * The :ref:`service discovery type ` + * to use for resolving the cluster. + */ + 'type'?: (_envoy_config_cluster_v3_Cluster_DiscoveryType__Output); + /** + * Configuration to use for EDS updates for the Cluster. + */ + 'eds_cluster_config': (_envoy_config_cluster_v3_Cluster_EdsClusterConfig__Output | null); + /** + * The timeout for new network connections to hosts in the cluster. + * If not set, a default value of 5s will be used. + */ + 'connect_timeout': (_google_protobuf_Duration__Output | null); + /** + * Soft limit on size of the cluster’s connections read and write buffers. If + * unspecified, an implementation defined default is applied (1MiB). + */ + 'per_connection_buffer_limit_bytes': (_google_protobuf_UInt32Value__Output | null); + /** + * The :ref:`load balancer type ` to use + * when picking a host in the cluster. + */ + 'lb_policy': (_envoy_config_cluster_v3_Cluster_LbPolicy__Output); + /** + * Optional :ref:`active health checking ` + * configuration for the cluster. If no + * configuration is specified no health checking will be done and all cluster + * members will be considered healthy at all times. + */ + 'health_checks': (_envoy_config_core_v3_HealthCheck__Output)[]; + /** + * Optional maximum requests for a single upstream connection. This parameter + * is respected by both the HTTP/1.1 and HTTP/2 connection pool + * implementations. If not specified, there is no limit. Setting this + * parameter to 1 will effectively disable keep alive. + * + * .. attention:: + * This field has been deprecated in favor of the :ref:`max_requests_per_connection ` field. + * @deprecated + */ + 'max_requests_per_connection': (_google_protobuf_UInt32Value__Output | null); + /** + * Optional :ref:`circuit breaking ` for the cluster. + */ + 'circuit_breakers': (_envoy_config_cluster_v3_CircuitBreakers__Output | null); + /** + * Additional options when handling HTTP1 requests. + * This has been deprecated in favor of http_protocol_options fields in the + * :ref:`http_protocol_options ` message. + * http_protocol_options can be set via the cluster's + * :ref:`extension_protocol_options`. + * See :ref:`upstream_http_protocol_options + * ` + * for example usage. + * @deprecated + */ + 'http_protocol_options': (_envoy_config_core_v3_Http1ProtocolOptions__Output | null); + /** + * Even if default HTTP2 protocol options are desired, this field must be + * set so that Envoy will assume that the upstream supports HTTP/2 when + * making new HTTP connection pool connections. Currently, Envoy only + * supports prior knowledge for upstream connections. Even if TLS is used + * with ALPN, ``http2_protocol_options`` must be specified. As an aside this allows HTTP/2 + * connections to happen over plain text. + * This has been deprecated in favor of http2_protocol_options fields in the + * :ref:`http_protocol_options ` + * message. http2_protocol_options can be set via the cluster's + * :ref:`extension_protocol_options`. + * See :ref:`upstream_http_protocol_options + * ` + * for example usage. + * @deprecated + */ + 'http2_protocol_options': (_envoy_config_core_v3_Http2ProtocolOptions__Output | null); + /** + * If the DNS refresh rate is specified and the cluster type is either + * :ref:`STRICT_DNS`, + * or :ref:`LOGICAL_DNS`, + * this value is used as the cluster’s DNS refresh + * rate. The value configured must be at least 1ms. If this setting is not specified, the + * value defaults to 5000ms. For cluster types other than + * :ref:`STRICT_DNS` + * and :ref:`LOGICAL_DNS` + * this setting is ignored. + */ + 'dns_refresh_rate': (_google_protobuf_Duration__Output | null); + /** + * The DNS IP address resolution policy. If this setting is not specified, the + * value defaults to + * :ref:`AUTO`. + */ + 'dns_lookup_family': (_envoy_config_cluster_v3_Cluster_DnsLookupFamily__Output); + /** + * If DNS resolvers are specified and the cluster type is either + * :ref:`STRICT_DNS`, + * or :ref:`LOGICAL_DNS`, + * this value is used to specify the cluster’s dns resolvers. + * If this setting is not specified, the value defaults to the default + * resolver, which uses /etc/resolv.conf for configuration. For cluster types + * other than + * :ref:`STRICT_DNS` + * and :ref:`LOGICAL_DNS` + * this setting is ignored. + * This field is deprecated in favor of ``dns_resolution_config`` + * which aggregates all of the DNS resolver configuration in a single message. + * @deprecated + */ + 'dns_resolvers': (_envoy_config_core_v3_Address__Output)[]; + /** + * If specified, outlier detection will be enabled for this upstream cluster. + * Each of the configuration values can be overridden via + * :ref:`runtime values `. + */ + 'outlier_detection': (_envoy_config_cluster_v3_OutlierDetection__Output | null); + /** + * The interval for removing stale hosts from a cluster type + * :ref:`ORIGINAL_DST`. + * Hosts are considered stale if they have not been used + * as upstream destinations during this interval. New hosts are added + * to original destination clusters on demand as new connections are + * redirected to Envoy, causing the number of hosts in the cluster to + * grow over time. Hosts that are not stale (they are actively used as + * destinations) are kept in the cluster, which allows connections to + * them remain open, saving the latency that would otherwise be spent + * on opening new connections. If this setting is not specified, the + * value defaults to 5000ms. For cluster types other than + * :ref:`ORIGINAL_DST` + * this setting is ignored. + */ + 'cleanup_interval': (_google_protobuf_Duration__Output | null); + /** + * Optional configuration used to bind newly established upstream connections. + * This overrides any bind_config specified in the bootstrap proto. + * If the address and port are empty, no bind will be performed. + */ + 'upstream_bind_config': (_envoy_config_core_v3_BindConfig__Output | null); + /** + * Configuration for load balancing subsetting. + */ + 'lb_subset_config': (_envoy_config_cluster_v3_Cluster_LbSubsetConfig__Output | null); + /** + * Optional configuration for the Ring Hash load balancing policy. + */ + 'ring_hash_lb_config'?: (_envoy_config_cluster_v3_Cluster_RingHashLbConfig__Output | null); + /** + * Optional custom transport socket implementation to use for upstream connections. + * To setup TLS, set a transport socket with name ``envoy.transport_sockets.tls`` and + * :ref:`UpstreamTlsContexts ` in the ``typed_config``. + * If no transport socket configuration is specified, new connections + * will be set up with plaintext. + */ + 'transport_socket': (_envoy_config_core_v3_TransportSocket__Output | null); + /** + * The Metadata field can be used to provide additional information about the + * cluster. It can be used for stats, logging, and varying filter behavior. + * Fields should use reverse DNS notation to denote which entity within Envoy + * will need the information. For instance, if the metadata is intended for + * the Router filter, the filter name should be specified as ``envoy.filters.http.router``. + */ + 'metadata': (_envoy_config_core_v3_Metadata__Output | null); + /** + * Determines how Envoy selects the protocol used to speak to upstream hosts. + * This has been deprecated in favor of setting explicit protocol selection + * in the :ref:`http_protocol_options + * ` message. + * http_protocol_options can be set via the cluster's + * :ref:`extension_protocol_options`. + * @deprecated + */ + 'protocol_selection': (_envoy_config_cluster_v3_Cluster_ClusterProtocolSelection__Output); + /** + * Common configuration for all load balancer implementations. + */ + 'common_lb_config': (_envoy_config_cluster_v3_Cluster_CommonLbConfig__Output | null); + /** + * An optional alternative to the cluster name to be used for observability. This name is used + * emitting stats for the cluster and access logging the cluster name. This will appear as + * additional information in configuration dumps of a cluster's current status as + * :ref:`observability_name ` + * and as an additional tag "upstream_cluster.name" while tracing. Note: Any ``:`` in the name + * will be converted to ``_`` when emitting statistics. This should not be confused with + * :ref:`Router Filter Header `. + */ + 'alt_stat_name': (string); + /** + * Additional options when handling HTTP requests upstream. These options will be applicable to + * both HTTP1 and HTTP2 requests. + * This has been deprecated in favor of + * :ref:`common_http_protocol_options ` + * in the :ref:`http_protocol_options ` message. + * common_http_protocol_options can be set via the cluster's + * :ref:`extension_protocol_options`. + * See :ref:`upstream_http_protocol_options + * ` + * for example usage. + * @deprecated + */ + 'common_http_protocol_options': (_envoy_config_core_v3_HttpProtocolOptions__Output | null); + /** + * Optional options for upstream connections. + */ + 'upstream_connection_options': (_envoy_config_cluster_v3_UpstreamConnectionOptions__Output | null); + /** + * If an upstream host becomes unhealthy (as determined by the configured health checks + * or outlier detection), immediately close all connections to the failed host. + * + * .. note:: + * + * This is currently only supported for connections created by tcp_proxy. + * + * .. note:: + * + * The current implementation of this feature closes all connections immediately when + * the unhealthy status is detected. If there are a large number of connections open + * to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of + * time exclusively closing these connections, and not processing any other traffic. + */ + 'close_connections_on_host_health_failure': (boolean); + /** + * If set to true, Envoy will ignore the health value of a host when processing its removal + * from service discovery. This means that if active health checking is used, Envoy will *not* + * wait for the endpoint to go unhealthy before removing it. + */ + 'ignore_health_on_host_removal': (boolean); + /** + * Setting this is required for specifying members of + * :ref:`STATIC`, + * :ref:`STRICT_DNS` + * or :ref:`LOGICAL_DNS` clusters. + * This field supersedes the ``hosts`` field in the v2 API. + * + * .. attention:: + * + * Setting this allows non-EDS cluster types to contain embedded EDS equivalent + * :ref:`endpoint assignments`. + */ + 'load_assignment': (_envoy_config_endpoint_v3_ClusterLoadAssignment__Output | null); + /** + * Optional configuration for the Original Destination load balancing policy. + */ + 'original_dst_lb_config'?: (_envoy_config_cluster_v3_Cluster_OriginalDstLbConfig__Output | null); + /** + * The extension_protocol_options field is used to provide extension-specific protocol options + * for upstream connections. The key should match the extension filter name, such as + * "envoy.filters.network.thrift_proxy". See the extension's documentation for details on + * specific options. + * [#next-major-version: make this a list of typed extensions.] + */ + 'typed_extension_protocol_options': ({[key: string]: _google_protobuf_Any__Output}); + /** + * Optional configuration for the LeastRequest load balancing policy. + */ + 'least_request_lb_config'?: (_envoy_config_cluster_v3_Cluster_LeastRequestLbConfig__Output | null); + /** + * The custom cluster type. + */ + 'cluster_type'?: (_envoy_config_cluster_v3_Cluster_CustomClusterType__Output | null); + /** + * Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, + * cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS + * resolution. + */ + 'respect_dns_ttl': (boolean); + /** + * An (optional) network filter chain, listed in the order the filters should be applied. + * The chain will be applied to all outgoing connections that Envoy makes to the upstream + * servers of this cluster. + */ + 'filters': (_envoy_config_cluster_v3_Filter__Output)[]; + /** + * If this field is set and is supported by the client, it will supersede the value of + * :ref:`lb_policy`. + */ + 'load_balancing_policy': (_envoy_config_cluster_v3_LoadBalancingPolicy__Output | null); + /** + * [#not-implemented-hide:] + * If present, tells the client where to send load reports via LRS. If not present, the + * client will fall back to a client-side default, which may be either (a) don't send any + * load reports or (b) send load reports for all clusters to a single default server + * (which may be configured in the bootstrap file). + * + * Note that if multiple clusters point to the same LRS server, the client may choose to + * create a separate stream for each cluster or it may choose to coalesce the data for + * multiple clusters onto a single stream. Either way, the client must make sure to send + * the data for any given cluster on no more than one stream. + * + * [#next-major-version: In the v3 API, we should consider restructuring this somehow, + * maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation + * from the LRS stream here.] + */ + 'lrs_server': (_envoy_config_core_v3_ConfigSource__Output | null); + /** + * Configuration to use different transport sockets for different endpoints. + * The entry of ``envoy.transport_socket_match`` in the + * :ref:`LbEndpoint.Metadata ` + * is used to match against the transport sockets as they appear in the list. The first + * :ref:`match ` is used. + * For example, with the following match + * + * .. code-block:: yaml + * + * transport_socket_matches: + * - name: "enableMTLS" + * match: + * acceptMTLS: true + * transport_socket: + * name: envoy.transport_sockets.tls + * config: { ... } # tls socket configuration + * - name: "defaultToPlaintext" + * match: {} + * transport_socket: + * name: envoy.transport_sockets.raw_buffer + * + * Connections to the endpoints whose metadata value under ``envoy.transport_socket_match`` + * having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. + * + * If a :ref:`socket match ` with empty match + * criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" + * socket match in case above. + * + * If an endpoint metadata's value under ``envoy.transport_socket_match`` does not match any + * ``TransportSocketMatch``, socket configuration fallbacks to use the ``tls_context`` or + * ``transport_socket`` specified in this cluster. + * + * This field allows gradual and flexible transport socket configuration changes. + * + * The metadata of endpoints in EDS can indicate transport socket capabilities. For example, + * an endpoint's metadata can have two key value pairs as "acceptMTLS": "true", + * "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic + * has "acceptPlaintext": "true" metadata information. + * + * Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS + * traffic for endpoints with "acceptMTLS": "true", by adding a corresponding + * ``TransportSocketMatch`` in this field. Other client Envoys receive CDS without + * ``transport_socket_match`` set, and still send plain text traffic to the same cluster. + * + * This field can be used to specify custom transport socket configurations for health + * checks by adding matching key/value pairs in a health check's + * :ref:`transport socket match criteria ` field. + * + * [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] + */ + 'transport_socket_matches': (_envoy_config_cluster_v3_Cluster_TransportSocketMatch__Output)[]; + /** + * If the DNS failure refresh rate is specified and the cluster type is either + * :ref:`STRICT_DNS`, + * or :ref:`LOGICAL_DNS`, + * this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is + * not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types + * other than :ref:`STRICT_DNS` and + * :ref:`LOGICAL_DNS` this setting is + * ignored. + */ + 'dns_failure_refresh_rate': (_envoy_config_cluster_v3_Cluster_RefreshRate__Output | null); + /** + * Always use TCP queries instead of UDP queries for DNS lookups. + * This field is deprecated in favor of ``dns_resolution_config`` + * which aggregates all of the DNS resolver configuration in a single message. + * @deprecated + */ + 'use_tcp_for_dns_lookups': (boolean); + /** + * HTTP protocol options that are applied only to upstream HTTP connections. + * These options apply to all HTTP versions. + * This has been deprecated in favor of + * :ref:`upstream_http_protocol_options ` + * in the :ref:`http_protocol_options ` message. + * upstream_http_protocol_options can be set via the cluster's + * :ref:`extension_protocol_options`. + * See :ref:`upstream_http_protocol_options + * ` + * for example usage. + * @deprecated + */ + 'upstream_http_protocol_options': (_envoy_config_core_v3_UpstreamHttpProtocolOptions__Output | null); + /** + * If track_timeout_budgets is true, the :ref:`timeout budget histograms + * ` will be published for each + * request. These show what percentage of a request's per try and global timeout was used. A value + * of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + * of 100 would indicate that the request took the entirety of the timeout given to it. + * + * .. attention:: + * + * This field has been deprecated in favor of ``timeout_budgets``, part of + * :ref:`track_cluster_stats `. + * @deprecated + */ + 'track_timeout_budgets': (boolean); + /** + * Optional customization and configuration of upstream connection pool, and upstream type. + * + * Currently this field only applies for HTTP traffic but is designed for eventual use for custom + * TCP upstreams. + * + * For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream + * HTTP, using the http connection pool and the codec from ``http2_protocol_options`` + * + * For routes where CONNECT termination is configured, Envoy will take downstream CONNECT + * requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool. + * + * The default pool used is the generic connection pool which creates the HTTP upstream for most + * HTTP requests, and the TCP upstream if CONNECT termination is configured. + * + * If users desire custom connection pool or upstream behavior, for example terminating + * CONNECT only if a custom filter indicates it is appropriate, the custom factories + * can be registered and configured here. + * [#extension-category: envoy.upstreams] + */ + 'upstream_config': (_envoy_config_core_v3_TypedExtensionConfig__Output | null); + /** + * Configuration to track optional cluster stats. + */ + 'track_cluster_stats': (_envoy_config_cluster_v3_TrackClusterStats__Output | null); + /** + * Preconnect configuration for this cluster. + */ + 'preconnect_policy': (_envoy_config_cluster_v3_Cluster_PreconnectPolicy__Output | null); + /** + * If ``connection_pool_per_downstream_connection`` is true, the cluster will use a separate + * connection pool for every downstream connection + */ + 'connection_pool_per_downstream_connection': (boolean); + /** + * Optional configuration for the Maglev load balancing policy. + */ + 'maglev_lb_config'?: (_envoy_config_cluster_v3_Cluster_MaglevLbConfig__Output | null); + /** + * DNS resolution configuration which includes the underlying dns resolver addresses and options. + * This field is deprecated in favor of + * :ref:`typed_dns_resolver_config `. + * @deprecated + */ + 'dns_resolution_config': (_envoy_config_core_v3_DnsResolutionConfig__Output | null); + /** + * Optional configuration for having cluster readiness block on warm-up. Currently, only applicable for + * :ref:`STRICT_DNS`, + * or :ref:`LOGICAL_DNS`, + * or :ref:`Redis Cluster`. + * If true, cluster readiness blocks on warm-up. If false, the cluster will complete + * initialization whether or not warm-up has completed. Defaults to true. + */ + 'wait_for_warm_on_init': (_google_protobuf_BoolValue__Output | null); + /** + * DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, + * or any other DNS resolver types and the related parameters. + * For example, an object of + * :ref:`CaresDnsResolverConfig ` + * can be packed into this ``typed_dns_resolver_config``. This configuration replaces the + * :ref:`dns_resolution_config ` + * configuration. + * During the transition period when both ``dns_resolution_config`` and ``typed_dns_resolver_config`` exists, + * when ``typed_dns_resolver_config`` is in place, Envoy will use it and ignore ``dns_resolution_config``. + * When ``typed_dns_resolver_config`` is missing, the default behavior is in place. + * [#extension-category: envoy.network.dns_resolver] + */ + 'typed_dns_resolver_config': (_envoy_config_core_v3_TypedExtensionConfig__Output | null); + /** + * Optional configuration for the RoundRobin load balancing policy. + */ + 'round_robin_lb_config'?: (_envoy_config_cluster_v3_Cluster_RoundRobinLbConfig__Output | null); + 'cluster_discovery_type': "type"|"cluster_type"; + /** + * Optional configuration for the load balancing algorithm selected by + * LbPolicy. Currently only + * :ref:`RING_HASH`, + * :ref:`MAGLEV` and + * :ref:`LEAST_REQUEST` + * has additional configuration options. + * Specifying ring_hash_lb_config or maglev_lb_config or least_request_lb_config without setting the corresponding + * LbPolicy will generate an error at runtime. + */ + 'lb_config': "ring_hash_lb_config"|"maglev_lb_config"|"original_dst_lb_config"|"least_request_lb_config"|"round_robin_lb_config"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/ClusterCollection.ts b/packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/ClusterCollection.ts new file mode 100644 index 000000000..a028c8491 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/ClusterCollection.ts @@ -0,0 +1,19 @@ +// Original file: deps/envoy-api/envoy/config/cluster/v3/cluster.proto + +import type { CollectionEntry as _xds_core_v3_CollectionEntry, CollectionEntry__Output as _xds_core_v3_CollectionEntry__Output } from '../../../../xds/core/v3/CollectionEntry'; + +/** + * Cluster list collections. Entries are ``Cluster`` resources or references. + * [#not-implemented-hide:] + */ +export interface ClusterCollection { + 'entries'?: (_xds_core_v3_CollectionEntry | null); +} + +/** + * Cluster list collections. Entries are ``Cluster`` resources or references. + * [#not-implemented-hide:] + */ +export interface ClusterCollection__Output { + 'entries': (_xds_core_v3_CollectionEntry__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/cluster/Filter.ts b/packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/Filter.ts similarity index 53% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/cluster/Filter.ts rename to packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/Filter.ts index 5608fadf2..cdcfeae89 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/cluster/Filter.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/Filter.ts @@ -1,29 +1,31 @@ -// Original file: deps/envoy-api/envoy/api/v2/cluster/filter.proto +// Original file: deps/envoy-api/envoy/config/cluster/v3/filter.proto import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; export interface Filter { /** - * The name of the filter to instantiate. The name must match a - * :ref:`supported filter `. + * The name of the filter configuration. */ 'name'?: (string); /** * Filter specific configuration which depends on the filter being * instantiated. See the supported filters for further documentation. + * Note that Envoy's :ref:`downstream network + * filters ` are not valid upstream filters. */ - 'typed_config'?: (_google_protobuf_Any); + 'typed_config'?: (_google_protobuf_Any | null); } export interface Filter__Output { /** - * The name of the filter to instantiate. The name must match a - * :ref:`supported filter `. + * The name of the filter configuration. */ 'name': (string); /** * Filter specific configuration which depends on the filter being * instantiated. See the supported filters for further documentation. + * Note that Envoy's :ref:`downstream network + * filters ` are not valid upstream filters. */ - 'typed_config'?: (_google_protobuf_Any__Output); + 'typed_config': (_google_protobuf_Any__Output | null); } diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/LoadBalancingPolicy.ts b/packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/LoadBalancingPolicy.ts similarity index 70% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/LoadBalancingPolicy.ts rename to packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/LoadBalancingPolicy.ts index f79112a1c..4e4efbe94 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/LoadBalancingPolicy.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/LoadBalancingPolicy.ts @@ -1,36 +1,23 @@ -// Original file: deps/envoy-api/envoy/api/v2/cluster.proto +// Original file: deps/envoy-api/envoy/config/cluster/v3/cluster.proto -import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../google/protobuf/Struct'; -import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../google/protobuf/Any'; +import type { TypedExtensionConfig as _envoy_config_core_v3_TypedExtensionConfig, TypedExtensionConfig__Output as _envoy_config_core_v3_TypedExtensionConfig__Output } from '../../../../envoy/config/core/v3/TypedExtensionConfig'; -export interface _envoy_api_v2_LoadBalancingPolicy_Policy { +export interface _envoy_config_cluster_v3_LoadBalancingPolicy_Policy { /** - * Required. The name of the LB policy. + * [#extension-category: envoy.load_balancing_policies] */ - 'name'?: (string); - /** - * Optional config for the LB policy. - * No more than one of these two fields may be populated. - */ - 'config'?: (_google_protobuf_Struct); - 'typed_config'?: (_google_protobuf_Any); + 'typed_extension_config'?: (_envoy_config_core_v3_TypedExtensionConfig | null); } -export interface _envoy_api_v2_LoadBalancingPolicy_Policy__Output { - /** - * Required. The name of the LB policy. - */ - 'name': (string); +export interface _envoy_config_cluster_v3_LoadBalancingPolicy_Policy__Output { /** - * Optional config for the LB policy. - * No more than one of these two fields may be populated. + * [#extension-category: envoy.load_balancing_policies] */ - 'config'?: (_google_protobuf_Struct__Output); - 'typed_config'?: (_google_protobuf_Any__Output); + 'typed_extension_config': (_envoy_config_core_v3_TypedExtensionConfig__Output | null); } /** - * [#not-implemented-hide:] Extensible load balancing policy configuration. + * Extensible load balancing policy configuration. * * Every LB policy defined via this mechanism will be identified via a unique name using reverse * DNS notation. If the policy needs configuration parameters, it must define a message for its @@ -56,11 +43,11 @@ export interface LoadBalancingPolicy { * supports. This provides a mechanism for starting to use new LB policies that are not yet * supported by all clients. */ - 'policies'?: (_envoy_api_v2_LoadBalancingPolicy_Policy)[]; + 'policies'?: (_envoy_config_cluster_v3_LoadBalancingPolicy_Policy)[]; } /** - * [#not-implemented-hide:] Extensible load balancing policy configuration. + * Extensible load balancing policy configuration. * * Every LB policy defined via this mechanism will be identified via a unique name using reverse * DNS notation. If the policy needs configuration parameters, it must define a message for its @@ -86,5 +73,5 @@ export interface LoadBalancingPolicy__Output { * supports. This provides a mechanism for starting to use new LB policies that are not yet * supported by all clients. */ - 'policies': (_envoy_api_v2_LoadBalancingPolicy_Policy__Output)[]; + 'policies': (_envoy_config_cluster_v3_LoadBalancingPolicy_Policy__Output)[]; } diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/cluster/OutlierDetection.ts b/packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/OutlierDetection.ts similarity index 63% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/cluster/OutlierDetection.ts rename to packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/OutlierDetection.ts index 53c1b4609..47dfc1877 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/cluster/OutlierDetection.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/OutlierDetection.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/api/v2/cluster/outlier_detection.proto +// Original file: deps/envoy-api/envoy/config/cluster/v3/outlier_detection.proto import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; @@ -6,51 +6,52 @@ import type { Duration as _google_protobuf_Duration, Duration__Output as _google /** * See the :ref:`architecture overview ` for * more information on outlier detection. - * [#next-free-field: 21] + * [#next-free-field: 23] */ export interface OutlierDetection { /** - * The number of consecutive 5xx responses or local origin errors that are mapped - * to 5xx error codes before a consecutive 5xx ejection - * occurs. Defaults to 5. + * The number of consecutive server-side error responses (for HTTP traffic, + * 5xx responses; for TCP traffic, connection failures; for Redis, failure to + * respond PONG; etc.) before a consecutive 5xx ejection occurs. Defaults to 5. */ - 'consecutive_5xx'?: (_google_protobuf_UInt32Value); + 'consecutive_5xx'?: (_google_protobuf_UInt32Value | null); /** * The time interval between ejection analysis sweeps. This can result in * both new ejections as well as hosts being returned to service. Defaults * to 10000ms or 10s. */ - 'interval'?: (_google_protobuf_Duration); + 'interval'?: (_google_protobuf_Duration | null); /** * The base time that a host is ejected for. The real time is equal to the - * base time multiplied by the number of times the host has been ejected. + * base time multiplied by the number of times the host has been ejected and is + * capped by :ref:`max_ejection_time`. * Defaults to 30000ms or 30s. */ - 'base_ejection_time'?: (_google_protobuf_Duration); + 'base_ejection_time'?: (_google_protobuf_Duration | null); /** * The maximum % of an upstream cluster that can be ejected due to outlier * detection. Defaults to 10% but will eject at least one host regardless of the value. */ - 'max_ejection_percent'?: (_google_protobuf_UInt32Value); + 'max_ejection_percent'?: (_google_protobuf_UInt32Value | null); /** * The % chance that a host will be actually ejected when an outlier status * is detected through consecutive 5xx. This setting can be used to disable * ejection or to ramp it up slowly. Defaults to 100. */ - 'enforcing_consecutive_5xx'?: (_google_protobuf_UInt32Value); + 'enforcing_consecutive_5xx'?: (_google_protobuf_UInt32Value | null); /** * The % chance that a host will be actually ejected when an outlier status * is detected through success rate statistics. This setting can be used to * disable ejection or to ramp it up slowly. Defaults to 100. */ - 'enforcing_success_rate'?: (_google_protobuf_UInt32Value); + 'enforcing_success_rate'?: (_google_protobuf_UInt32Value | null); /** * The number of hosts in a cluster that must have enough request volume to * detect success rate outliers. If the number of hosts is less than this * setting, outlier detection via success rate statistics is not performed * for any host in the cluster. Defaults to 5. */ - 'success_rate_minimum_hosts'?: (_google_protobuf_UInt32Value); + 'success_rate_minimum_hosts'?: (_google_protobuf_UInt32Value | null); /** * The minimum number of total requests that must be collected in one * interval (as defined by the interval duration above) to include this host @@ -58,7 +59,7 @@ export interface OutlierDetection { * setting, outlier detection via success rate statistics is not performed * for that host. Defaults to 100. */ - 'success_rate_request_volume'?: (_google_protobuf_UInt32Value); + 'success_rate_request_volume'?: (_google_protobuf_UInt32Value | null); /** * This factor is used to determine the ejection threshold for success rate * outlier ejection. The ejection threshold is the difference between the @@ -68,59 +69,59 @@ export interface OutlierDetection { * double. That is, if the desired factor is 1.9, the runtime value should * be 1900. Defaults to 1900. */ - 'success_rate_stdev_factor'?: (_google_protobuf_UInt32Value); + 'success_rate_stdev_factor'?: (_google_protobuf_UInt32Value | null); /** * The number of consecutive gateway failures (502, 503, 504 status codes) * before a consecutive gateway failure ejection occurs. Defaults to 5. */ - 'consecutive_gateway_failure'?: (_google_protobuf_UInt32Value); + 'consecutive_gateway_failure'?: (_google_protobuf_UInt32Value | null); /** * The % chance that a host will be actually ejected when an outlier status * is detected through consecutive gateway failures. This setting can be * used to disable ejection or to ramp it up slowly. Defaults to 0. */ - 'enforcing_consecutive_gateway_failure'?: (_google_protobuf_UInt32Value); + 'enforcing_consecutive_gateway_failure'?: (_google_protobuf_UInt32Value | null); /** * Determines whether to distinguish local origin failures from external errors. If set to true * the following configuration parameters are taken into account: - * :ref:`consecutive_local_origin_failure`, - * :ref:`enforcing_consecutive_local_origin_failure` + * :ref:`consecutive_local_origin_failure`, + * :ref:`enforcing_consecutive_local_origin_failure` * and - * :ref:`enforcing_local_origin_success_rate`. + * :ref:`enforcing_local_origin_success_rate`. * Defaults to false. */ 'split_external_local_origin_errors'?: (boolean); /** * The number of consecutive locally originated failures before ejection * occurs. Defaults to 5. Parameter takes effect only when - * :ref:`split_external_local_origin_errors` + * :ref:`split_external_local_origin_errors` * is set to true. */ - 'consecutive_local_origin_failure'?: (_google_protobuf_UInt32Value); + 'consecutive_local_origin_failure'?: (_google_protobuf_UInt32Value | null); /** * The % chance that a host will be actually ejected when an outlier status * is detected through consecutive locally originated failures. This setting can be * used to disable ejection or to ramp it up slowly. Defaults to 100. * Parameter takes effect only when - * :ref:`split_external_local_origin_errors` + * :ref:`split_external_local_origin_errors` * is set to true. */ - 'enforcing_consecutive_local_origin_failure'?: (_google_protobuf_UInt32Value); + 'enforcing_consecutive_local_origin_failure'?: (_google_protobuf_UInt32Value | null); /** * The % chance that a host will be actually ejected when an outlier status * is detected through success rate statistics for locally originated errors. * This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. * Parameter takes effect only when - * :ref:`split_external_local_origin_errors` + * :ref:`split_external_local_origin_errors` * is set to true. */ - 'enforcing_local_origin_success_rate'?: (_google_protobuf_UInt32Value); + 'enforcing_local_origin_success_rate'?: (_google_protobuf_UInt32Value | null); /** * The failure percentage to use when determining failure percentage-based outlier detection. If * the failure percentage of a given host is greater than or equal to this value, it will be * ejected. Defaults to 85. */ - 'failure_percentage_threshold'?: (_google_protobuf_UInt32Value); + 'failure_percentage_threshold'?: (_google_protobuf_UInt32Value | null); /** * The % chance that a host will be actually ejected when an outlier status is detected through * failure percentage statistics. This setting can be used to disable ejection or to ramp it up @@ -129,76 +130,90 @@ export interface OutlierDetection { * [#next-major-version: setting this without setting failure_percentage_threshold should be * invalid in v4.] */ - 'enforcing_failure_percentage'?: (_google_protobuf_UInt32Value); + 'enforcing_failure_percentage'?: (_google_protobuf_UInt32Value | null); /** * The % chance that a host will be actually ejected when an outlier status is detected through * local-origin failure percentage statistics. This setting can be used to disable ejection or to * ramp it up slowly. Defaults to 0. */ - 'enforcing_failure_percentage_local_origin'?: (_google_protobuf_UInt32Value); + 'enforcing_failure_percentage_local_origin'?: (_google_protobuf_UInt32Value | null); /** * The minimum number of hosts in a cluster in order to perform failure percentage-based ejection. * If the total number of hosts in the cluster is less than this value, failure percentage-based * ejection will not be performed. Defaults to 5. */ - 'failure_percentage_minimum_hosts'?: (_google_protobuf_UInt32Value); + 'failure_percentage_minimum_hosts'?: (_google_protobuf_UInt32Value | null); /** * The minimum number of total requests that must be collected in one interval (as defined by the * interval duration above) to perform failure percentage-based ejection for this host. If the * volume is lower than this setting, failure percentage-based ejection will not be performed for * this host. Defaults to 50. */ - 'failure_percentage_request_volume'?: (_google_protobuf_UInt32Value); + 'failure_percentage_request_volume'?: (_google_protobuf_UInt32Value | null); + /** + * The maximum time that a host is ejected for. See :ref:`base_ejection_time` + * for more information. If not specified, the default value (300000ms or 300s) or + * :ref:`base_ejection_time` value is applied, whatever is larger. + */ + 'max_ejection_time'?: (_google_protobuf_Duration | null); + /** + * The maximum amount of jitter to add to the ejection time, in order to prevent + * a 'thundering herd' effect where all proxies try to reconnect to host at the same time. + * See :ref:`max_ejection_time_jitter` + * Defaults to 0s. + */ + 'max_ejection_time_jitter'?: (_google_protobuf_Duration | null); } /** * See the :ref:`architecture overview ` for * more information on outlier detection. - * [#next-free-field: 21] + * [#next-free-field: 23] */ export interface OutlierDetection__Output { /** - * The number of consecutive 5xx responses or local origin errors that are mapped - * to 5xx error codes before a consecutive 5xx ejection - * occurs. Defaults to 5. + * The number of consecutive server-side error responses (for HTTP traffic, + * 5xx responses; for TCP traffic, connection failures; for Redis, failure to + * respond PONG; etc.) before a consecutive 5xx ejection occurs. Defaults to 5. */ - 'consecutive_5xx'?: (_google_protobuf_UInt32Value__Output); + 'consecutive_5xx': (_google_protobuf_UInt32Value__Output | null); /** * The time interval between ejection analysis sweeps. This can result in * both new ejections as well as hosts being returned to service. Defaults * to 10000ms or 10s. */ - 'interval'?: (_google_protobuf_Duration__Output); + 'interval': (_google_protobuf_Duration__Output | null); /** * The base time that a host is ejected for. The real time is equal to the - * base time multiplied by the number of times the host has been ejected. + * base time multiplied by the number of times the host has been ejected and is + * capped by :ref:`max_ejection_time`. * Defaults to 30000ms or 30s. */ - 'base_ejection_time'?: (_google_protobuf_Duration__Output); + 'base_ejection_time': (_google_protobuf_Duration__Output | null); /** * The maximum % of an upstream cluster that can be ejected due to outlier * detection. Defaults to 10% but will eject at least one host regardless of the value. */ - 'max_ejection_percent'?: (_google_protobuf_UInt32Value__Output); + 'max_ejection_percent': (_google_protobuf_UInt32Value__Output | null); /** * The % chance that a host will be actually ejected when an outlier status * is detected through consecutive 5xx. This setting can be used to disable * ejection or to ramp it up slowly. Defaults to 100. */ - 'enforcing_consecutive_5xx'?: (_google_protobuf_UInt32Value__Output); + 'enforcing_consecutive_5xx': (_google_protobuf_UInt32Value__Output | null); /** * The % chance that a host will be actually ejected when an outlier status * is detected through success rate statistics. This setting can be used to * disable ejection or to ramp it up slowly. Defaults to 100. */ - 'enforcing_success_rate'?: (_google_protobuf_UInt32Value__Output); + 'enforcing_success_rate': (_google_protobuf_UInt32Value__Output | null); /** * The number of hosts in a cluster that must have enough request volume to * detect success rate outliers. If the number of hosts is less than this * setting, outlier detection via success rate statistics is not performed * for any host in the cluster. Defaults to 5. */ - 'success_rate_minimum_hosts'?: (_google_protobuf_UInt32Value__Output); + 'success_rate_minimum_hosts': (_google_protobuf_UInt32Value__Output | null); /** * The minimum number of total requests that must be collected in one * interval (as defined by the interval duration above) to include this host @@ -206,7 +221,7 @@ export interface OutlierDetection__Output { * setting, outlier detection via success rate statistics is not performed * for that host. Defaults to 100. */ - 'success_rate_request_volume'?: (_google_protobuf_UInt32Value__Output); + 'success_rate_request_volume': (_google_protobuf_UInt32Value__Output | null); /** * This factor is used to determine the ejection threshold for success rate * outlier ejection. The ejection threshold is the difference between the @@ -216,59 +231,59 @@ export interface OutlierDetection__Output { * double. That is, if the desired factor is 1.9, the runtime value should * be 1900. Defaults to 1900. */ - 'success_rate_stdev_factor'?: (_google_protobuf_UInt32Value__Output); + 'success_rate_stdev_factor': (_google_protobuf_UInt32Value__Output | null); /** * The number of consecutive gateway failures (502, 503, 504 status codes) * before a consecutive gateway failure ejection occurs. Defaults to 5. */ - 'consecutive_gateway_failure'?: (_google_protobuf_UInt32Value__Output); + 'consecutive_gateway_failure': (_google_protobuf_UInt32Value__Output | null); /** * The % chance that a host will be actually ejected when an outlier status * is detected through consecutive gateway failures. This setting can be * used to disable ejection or to ramp it up slowly. Defaults to 0. */ - 'enforcing_consecutive_gateway_failure'?: (_google_protobuf_UInt32Value__Output); + 'enforcing_consecutive_gateway_failure': (_google_protobuf_UInt32Value__Output | null); /** * Determines whether to distinguish local origin failures from external errors. If set to true * the following configuration parameters are taken into account: - * :ref:`consecutive_local_origin_failure`, - * :ref:`enforcing_consecutive_local_origin_failure` + * :ref:`consecutive_local_origin_failure`, + * :ref:`enforcing_consecutive_local_origin_failure` * and - * :ref:`enforcing_local_origin_success_rate`. + * :ref:`enforcing_local_origin_success_rate`. * Defaults to false. */ 'split_external_local_origin_errors': (boolean); /** * The number of consecutive locally originated failures before ejection * occurs. Defaults to 5. Parameter takes effect only when - * :ref:`split_external_local_origin_errors` + * :ref:`split_external_local_origin_errors` * is set to true. */ - 'consecutive_local_origin_failure'?: (_google_protobuf_UInt32Value__Output); + 'consecutive_local_origin_failure': (_google_protobuf_UInt32Value__Output | null); /** * The % chance that a host will be actually ejected when an outlier status * is detected through consecutive locally originated failures. This setting can be * used to disable ejection or to ramp it up slowly. Defaults to 100. * Parameter takes effect only when - * :ref:`split_external_local_origin_errors` + * :ref:`split_external_local_origin_errors` * is set to true. */ - 'enforcing_consecutive_local_origin_failure'?: (_google_protobuf_UInt32Value__Output); + 'enforcing_consecutive_local_origin_failure': (_google_protobuf_UInt32Value__Output | null); /** * The % chance that a host will be actually ejected when an outlier status * is detected through success rate statistics for locally originated errors. * This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. * Parameter takes effect only when - * :ref:`split_external_local_origin_errors` + * :ref:`split_external_local_origin_errors` * is set to true. */ - 'enforcing_local_origin_success_rate'?: (_google_protobuf_UInt32Value__Output); + 'enforcing_local_origin_success_rate': (_google_protobuf_UInt32Value__Output | null); /** * The failure percentage to use when determining failure percentage-based outlier detection. If * the failure percentage of a given host is greater than or equal to this value, it will be * ejected. Defaults to 85. */ - 'failure_percentage_threshold'?: (_google_protobuf_UInt32Value__Output); + 'failure_percentage_threshold': (_google_protobuf_UInt32Value__Output | null); /** * The % chance that a host will be actually ejected when an outlier status is detected through * failure percentage statistics. This setting can be used to disable ejection or to ramp it up @@ -277,24 +292,37 @@ export interface OutlierDetection__Output { * [#next-major-version: setting this without setting failure_percentage_threshold should be * invalid in v4.] */ - 'enforcing_failure_percentage'?: (_google_protobuf_UInt32Value__Output); + 'enforcing_failure_percentage': (_google_protobuf_UInt32Value__Output | null); /** * The % chance that a host will be actually ejected when an outlier status is detected through * local-origin failure percentage statistics. This setting can be used to disable ejection or to * ramp it up slowly. Defaults to 0. */ - 'enforcing_failure_percentage_local_origin'?: (_google_protobuf_UInt32Value__Output); + 'enforcing_failure_percentage_local_origin': (_google_protobuf_UInt32Value__Output | null); /** * The minimum number of hosts in a cluster in order to perform failure percentage-based ejection. * If the total number of hosts in the cluster is less than this value, failure percentage-based * ejection will not be performed. Defaults to 5. */ - 'failure_percentage_minimum_hosts'?: (_google_protobuf_UInt32Value__Output); + 'failure_percentage_minimum_hosts': (_google_protobuf_UInt32Value__Output | null); /** * The minimum number of total requests that must be collected in one interval (as defined by the * interval duration above) to perform failure percentage-based ejection for this host. If the * volume is lower than this setting, failure percentage-based ejection will not be performed for * this host. Defaults to 50. */ - 'failure_percentage_request_volume'?: (_google_protobuf_UInt32Value__Output); + 'failure_percentage_request_volume': (_google_protobuf_UInt32Value__Output | null); + /** + * The maximum time that a host is ejected for. See :ref:`base_ejection_time` + * for more information. If not specified, the default value (300000ms or 300s) or + * :ref:`base_ejection_time` value is applied, whatever is larger. + */ + 'max_ejection_time': (_google_protobuf_Duration__Output | null); + /** + * The maximum amount of jitter to add to the ejection time, in order to prevent + * a 'thundering herd' effect where all proxies try to reconnect to host at the same time. + * See :ref:`max_ejection_time_jitter` + * Defaults to 0s. + */ + 'max_ejection_time_jitter': (_google_protobuf_Duration__Output | null); } diff --git a/packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/TrackClusterStats.ts b/packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/TrackClusterStats.ts new file mode 100644 index 000000000..a65d1fd8d --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/TrackClusterStats.ts @@ -0,0 +1,36 @@ +// Original file: deps/envoy-api/envoy/config/cluster/v3/cluster.proto + + +export interface TrackClusterStats { + /** + * If timeout_budgets is true, the :ref:`timeout budget histograms + * ` will be published for each + * request. These show what percentage of a request's per try and global timeout was used. A value + * of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + * of 100 would indicate that the request took the entirety of the timeout given to it. + */ + 'timeout_budgets'?: (boolean); + /** + * If request_response_sizes is true, then the :ref:`histograms + * ` tracking header and body sizes + * of requests and responses will be published. + */ + 'request_response_sizes'?: (boolean); +} + +export interface TrackClusterStats__Output { + /** + * If timeout_budgets is true, the :ref:`timeout budget histograms + * ` will be published for each + * request. These show what percentage of a request's per try and global timeout was used. A value + * of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value + * of 100 would indicate that the request took the entirety of the timeout given to it. + */ + 'timeout_budgets': (boolean); + /** + * If request_response_sizes is true, then the :ref:`histograms + * ` tracking header and body sizes + * of requests and responses will be published. + */ + 'request_response_sizes': (boolean); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/UpstreamConnectionOptions.ts b/packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/UpstreamConnectionOptions.ts new file mode 100644 index 000000000..cda367641 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/cluster/v3/UpstreamConnectionOptions.ts @@ -0,0 +1,29 @@ +// Original file: deps/envoy-api/envoy/config/cluster/v3/cluster.proto + +import type { TcpKeepalive as _envoy_config_core_v3_TcpKeepalive, TcpKeepalive__Output as _envoy_config_core_v3_TcpKeepalive__Output } from '../../../../envoy/config/core/v3/TcpKeepalive'; + +export interface UpstreamConnectionOptions { + /** + * If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. + */ + 'tcp_keepalive'?: (_envoy_config_core_v3_TcpKeepalive | null); + /** + * If enabled, associates the interface name of the local address with the upstream connection. + * This can be used by extensions during processing of requests. The association mechanism is + * implementation specific. Defaults to false due to performance concerns. + */ + 'set_local_interface_name_on_upstream_connections'?: (boolean); +} + +export interface UpstreamConnectionOptions__Output { + /** + * If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. + */ + 'tcp_keepalive': (_envoy_config_core_v3_TcpKeepalive__Output | null); + /** + * If enabled, associates the interface name of the local address with the upstream connection. + * This can be used by extensions during processing of requests. The association mechanism is + * implementation specific. Defaults to false due to performance concerns. + */ + 'set_local_interface_name_on_upstream_connections': (boolean); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/Address.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/Address.ts new file mode 100644 index 000000000..5e29cdbf4 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/Address.ts @@ -0,0 +1,37 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/address.proto + +import type { SocketAddress as _envoy_config_core_v3_SocketAddress, SocketAddress__Output as _envoy_config_core_v3_SocketAddress__Output } from '../../../../envoy/config/core/v3/SocketAddress'; +import type { Pipe as _envoy_config_core_v3_Pipe, Pipe__Output as _envoy_config_core_v3_Pipe__Output } from '../../../../envoy/config/core/v3/Pipe'; +import type { EnvoyInternalAddress as _envoy_config_core_v3_EnvoyInternalAddress, EnvoyInternalAddress__Output as _envoy_config_core_v3_EnvoyInternalAddress__Output } from '../../../../envoy/config/core/v3/EnvoyInternalAddress'; + +/** + * Addresses specify either a logical or physical address and port, which are + * used to tell Envoy where to bind/listen, connect to upstream and find + * management servers. + */ +export interface Address { + 'socket_address'?: (_envoy_config_core_v3_SocketAddress | null); + 'pipe'?: (_envoy_config_core_v3_Pipe | null); + /** + * Specifies a user-space address handled by :ref:`internal listeners + * `. + */ + 'envoy_internal_address'?: (_envoy_config_core_v3_EnvoyInternalAddress | null); + 'address'?: "socket_address"|"pipe"|"envoy_internal_address"; +} + +/** + * Addresses specify either a logical or physical address and port, which are + * used to tell Envoy where to bind/listen, connect to upstream and find + * management servers. + */ +export interface Address__Output { + 'socket_address'?: (_envoy_config_core_v3_SocketAddress__Output | null); + 'pipe'?: (_envoy_config_core_v3_Pipe__Output | null); + /** + * Specifies a user-space address handled by :ref:`internal listeners + * `. + */ + 'envoy_internal_address'?: (_envoy_config_core_v3_EnvoyInternalAddress__Output | null); + 'address': "socket_address"|"pipe"|"envoy_internal_address"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/AggregatedConfigSource.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/AggregatedConfigSource.ts similarity index 57% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/AggregatedConfigSource.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/AggregatedConfigSource.ts index 6837dd0db..428ab4b56 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/AggregatedConfigSource.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/AggregatedConfigSource.ts @@ -1,9 +1,9 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/config_source.proto +// Original file: deps/envoy-api/envoy/config/core/v3/config_source.proto /** * Aggregated Discovery Service (ADS) options. This is currently empty, but when - * set in :ref:`ConfigSource ` can be used to + * set in :ref:`ConfigSource ` can be used to * specify that ADS is to be used. */ export interface AggregatedConfigSource { @@ -11,7 +11,7 @@ export interface AggregatedConfigSource { /** * Aggregated Discovery Service (ADS) options. This is currently empty, but when - * set in :ref:`ConfigSource ` can be used to + * set in :ref:`ConfigSource ` can be used to * specify that ADS is to be used. */ export interface AggregatedConfigSource__Output { diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/AlternateProtocolsCacheOptions.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/AlternateProtocolsCacheOptions.ts new file mode 100644 index 000000000..ed3027a0c --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/AlternateProtocolsCacheOptions.ts @@ -0,0 +1,152 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/protocol.proto + +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; +import type { TypedExtensionConfig as _envoy_config_core_v3_TypedExtensionConfig, TypedExtensionConfig__Output as _envoy_config_core_v3_TypedExtensionConfig__Output } from '../../../../envoy/config/core/v3/TypedExtensionConfig'; + +/** + * Allows pre-populating the cache with HTTP/3 alternate protocols entries with a 7 day lifetime. + * This will cause Envoy to attempt HTTP/3 to those upstreams, even if the upstreams have not + * advertised HTTP/3 support. These entries will be overwritten by alt-svc + * response headers or cached values. + * As with regular cached entries, if the origin response would result in clearing an existing + * alternate protocol cache entry, pre-populated entries will also be cleared. + * Adding a cache entry with hostname=foo.com port=123 is the equivalent of getting + * response headers + * alt-svc: h3=:"123"; ma=86400" in a response to a request to foo.com:123 + */ +export interface _envoy_config_core_v3_AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry { + /** + * The host name for the alternate protocol entry. + */ + 'hostname'?: (string); + /** + * The port for the alternate protocol entry. + */ + 'port'?: (number); +} + +/** + * Allows pre-populating the cache with HTTP/3 alternate protocols entries with a 7 day lifetime. + * This will cause Envoy to attempt HTTP/3 to those upstreams, even if the upstreams have not + * advertised HTTP/3 support. These entries will be overwritten by alt-svc + * response headers or cached values. + * As with regular cached entries, if the origin response would result in clearing an existing + * alternate protocol cache entry, pre-populated entries will also be cleared. + * Adding a cache entry with hostname=foo.com port=123 is the equivalent of getting + * response headers + * alt-svc: h3=:"123"; ma=86400" in a response to a request to foo.com:123 + */ +export interface _envoy_config_core_v3_AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry__Output { + /** + * The host name for the alternate protocol entry. + */ + 'hostname': (string); + /** + * The port for the alternate protocol entry. + */ + 'port': (number); +} + +/** + * Configures the alternate protocols cache which tracks alternate protocols that can be used to + * make an HTTP connection to an origin server. See https://tools.ietf.org/html/rfc7838 for + * HTTP Alternative Services and https://datatracker.ietf.org/doc/html/draft-ietf-dnsop-svcb-https-04 + * for the "HTTPS" DNS resource record. + * [#next-free-field: 6] + */ +export interface AlternateProtocolsCacheOptions { + /** + * The name of the cache. Multiple named caches allow independent alternate protocols cache + * configurations to operate within a single Envoy process using different configurations. All + * alternate protocols cache options with the same name *must* be equal in all fields when + * referenced from different configuration components. Configuration will fail to load if this is + * not the case. + */ + 'name'?: (string); + /** + * The maximum number of entries that the cache will hold. If not specified defaults to 1024. + * + * .. note: + * + * The implementation is approximate and enforced independently on each worker thread, thus + * it is possible for the maximum entries in the cache to go slightly above the configured + * value depending on timing. This is similar to how other circuit breakers work. + */ + 'max_entries'?: (_google_protobuf_UInt32Value | null); + /** + * Allows configuring a persistent + * :ref:`key value store ` to flush + * alternate protocols entries to disk. + * This function is currently only supported if concurrency is 1 + * Cached entries will take precedence over pre-populated entries below. + */ + 'key_value_store_config'?: (_envoy_config_core_v3_TypedExtensionConfig | null); + /** + * Allows pre-populating the cache with entries, as described above. + */ + 'prepopulated_entries'?: (_envoy_config_core_v3_AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry)[]; + /** + * Optional list of hostnames suffixes for which Alt-Svc entries can be shared. For example, if + * this list contained the value ``.c.example.com``, then an Alt-Svc entry for ``foo.c.example.com`` + * could be shared with ``bar.c.example.com`` but would not be shared with ``baz.example.com``. On + * the other hand, if the list contained the value ``.example.com`` then all three hosts could share + * Alt-Svc entries. Each entry must start with ``.``. If a hostname matches multiple suffixes, the + * first listed suffix will be used. + * + * Since lookup in this list is O(n), it is recommended that the number of suffixes be limited. + * [#not-implemented-hide:] + */ + 'canonical_suffixes'?: (string)[]; +} + +/** + * Configures the alternate protocols cache which tracks alternate protocols that can be used to + * make an HTTP connection to an origin server. See https://tools.ietf.org/html/rfc7838 for + * HTTP Alternative Services and https://datatracker.ietf.org/doc/html/draft-ietf-dnsop-svcb-https-04 + * for the "HTTPS" DNS resource record. + * [#next-free-field: 6] + */ +export interface AlternateProtocolsCacheOptions__Output { + /** + * The name of the cache. Multiple named caches allow independent alternate protocols cache + * configurations to operate within a single Envoy process using different configurations. All + * alternate protocols cache options with the same name *must* be equal in all fields when + * referenced from different configuration components. Configuration will fail to load if this is + * not the case. + */ + 'name': (string); + /** + * The maximum number of entries that the cache will hold. If not specified defaults to 1024. + * + * .. note: + * + * The implementation is approximate and enforced independently on each worker thread, thus + * it is possible for the maximum entries in the cache to go slightly above the configured + * value depending on timing. This is similar to how other circuit breakers work. + */ + 'max_entries': (_google_protobuf_UInt32Value__Output | null); + /** + * Allows configuring a persistent + * :ref:`key value store ` to flush + * alternate protocols entries to disk. + * This function is currently only supported if concurrency is 1 + * Cached entries will take precedence over pre-populated entries below. + */ + 'key_value_store_config': (_envoy_config_core_v3_TypedExtensionConfig__Output | null); + /** + * Allows pre-populating the cache with entries, as described above. + */ + 'prepopulated_entries': (_envoy_config_core_v3_AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry__Output)[]; + /** + * Optional list of hostnames suffixes for which Alt-Svc entries can be shared. For example, if + * this list contained the value ``.c.example.com``, then an Alt-Svc entry for ``foo.c.example.com`` + * could be shared with ``bar.c.example.com`` but would not be shared with ``baz.example.com``. On + * the other hand, if the list contained the value ``.example.com`` then all three hosts could share + * Alt-Svc entries. Each entry must start with ``.``. If a hostname matches multiple suffixes, the + * first listed suffix will be used. + * + * Since lookup in this list is O(n), it is recommended that the number of suffixes be limited. + * [#not-implemented-hide:] + */ + 'canonical_suffixes': (string)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/ApiConfigSource.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/ApiConfigSource.ts new file mode 100644 index 000000000..0a2f11bdf --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/ApiConfigSource.ts @@ -0,0 +1,220 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/config_source.proto + +import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; +import type { GrpcService as _envoy_config_core_v3_GrpcService, GrpcService__Output as _envoy_config_core_v3_GrpcService__Output } from '../../../../envoy/config/core/v3/GrpcService'; +import type { RateLimitSettings as _envoy_config_core_v3_RateLimitSettings, RateLimitSettings__Output as _envoy_config_core_v3_RateLimitSettings__Output } from '../../../../envoy/config/core/v3/RateLimitSettings'; +import type { ApiVersion as _envoy_config_core_v3_ApiVersion, ApiVersion__Output as _envoy_config_core_v3_ApiVersion__Output } from '../../../../envoy/config/core/v3/ApiVersion'; +import type { TypedExtensionConfig as _envoy_config_core_v3_TypedExtensionConfig, TypedExtensionConfig__Output as _envoy_config_core_v3_TypedExtensionConfig__Output } from '../../../../envoy/config/core/v3/TypedExtensionConfig'; + +// Original file: deps/envoy-api/envoy/config/core/v3/config_source.proto + +/** + * APIs may be fetched via either REST or gRPC. + */ +export const _envoy_config_core_v3_ApiConfigSource_ApiType = { + /** + * Ideally this would be 'reserved 0' but one can't reserve the default + * value. Instead we throw an exception if this is ever used. + * @deprecated + */ + DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE: 'DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE', + /** + * REST-JSON v2 API. The `canonical JSON encoding + * `_ for + * the v2 protos is used. + */ + REST: 'REST', + /** + * SotW gRPC service. + */ + GRPC: 'GRPC', + /** + * Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} + * rather than Discovery{Request,Response}. Rather than sending Envoy the entire state + * with every update, the xDS server only sends what has changed since the last update. + */ + DELTA_GRPC: 'DELTA_GRPC', + /** + * SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be + * multiplexed on a single connection to an ADS endpoint. + * [#not-implemented-hide:] + */ + AGGREGATED_GRPC: 'AGGREGATED_GRPC', + /** + * Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be + * multiplexed on a single connection to an ADS endpoint. + * [#not-implemented-hide:] + */ + AGGREGATED_DELTA_GRPC: 'AGGREGATED_DELTA_GRPC', +} as const; + +/** + * APIs may be fetched via either REST or gRPC. + */ +export type _envoy_config_core_v3_ApiConfigSource_ApiType = + /** + * Ideally this would be 'reserved 0' but one can't reserve the default + * value. Instead we throw an exception if this is ever used. + */ + | 'DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE' + | 0 + /** + * REST-JSON v2 API. The `canonical JSON encoding + * `_ for + * the v2 protos is used. + */ + | 'REST' + | 1 + /** + * SotW gRPC service. + */ + | 'GRPC' + | 2 + /** + * Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} + * rather than Discovery{Request,Response}. Rather than sending Envoy the entire state + * with every update, the xDS server only sends what has changed since the last update. + */ + | 'DELTA_GRPC' + | 3 + /** + * SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be + * multiplexed on a single connection to an ADS endpoint. + * [#not-implemented-hide:] + */ + | 'AGGREGATED_GRPC' + | 5 + /** + * Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be + * multiplexed on a single connection to an ADS endpoint. + * [#not-implemented-hide:] + */ + | 'AGGREGATED_DELTA_GRPC' + | 6 + +/** + * APIs may be fetched via either REST or gRPC. + */ +export type _envoy_config_core_v3_ApiConfigSource_ApiType__Output = typeof _envoy_config_core_v3_ApiConfigSource_ApiType[keyof typeof _envoy_config_core_v3_ApiConfigSource_ApiType] + +/** + * API configuration source. This identifies the API type and cluster that Envoy + * will use to fetch an xDS API. + * [#next-free-field: 10] + */ +export interface ApiConfigSource { + /** + * API type (gRPC, REST, delta gRPC) + */ + 'api_type'?: (_envoy_config_core_v3_ApiConfigSource_ApiType); + /** + * Cluster names should be used only with REST. If > 1 + * cluster is defined, clusters will be cycled through if any kind of failure + * occurs. + * + * .. note:: + * + * The cluster with name ``cluster_name`` must be statically defined and its + * type must not be ``EDS``. + */ + 'cluster_names'?: (string)[]; + /** + * For REST APIs, the delay between successive polls. + */ + 'refresh_delay'?: (_google_protobuf_Duration | null); + /** + * Multiple gRPC services be provided for GRPC. If > 1 cluster is defined, + * services will be cycled through if any kind of failure occurs. + */ + 'grpc_services'?: (_envoy_config_core_v3_GrpcService)[]; + /** + * For REST APIs, the request timeout. If not set, a default value of 1s will be used. + */ + 'request_timeout'?: (_google_protobuf_Duration | null); + /** + * For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be + * rate limited. + */ + 'rate_limit_settings'?: (_envoy_config_core_v3_RateLimitSettings | null); + /** + * Skip the node identifier in subsequent discovery requests for streaming gRPC config types. + */ + 'set_node_on_first_message_only'?: (boolean); + /** + * API version for xDS transport protocol. This describes the xDS gRPC/REST + * endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + */ + 'transport_api_version'?: (_envoy_config_core_v3_ApiVersion); + /** + * A list of config validators that will be executed when a new update is + * received from the ApiConfigSource. Note that each validator handles a + * specific xDS service type, and only the validators corresponding to the + * type url (in ``:ref: DiscoveryResponse`` or ``:ref: DeltaDiscoveryResponse``) + * will be invoked. + * If the validator returns false or throws an exception, the config will be rejected by + * the client, and a NACK will be sent. + * [#extension-category: envoy.config.validators] + */ + 'config_validators'?: (_envoy_config_core_v3_TypedExtensionConfig)[]; +} + +/** + * API configuration source. This identifies the API type and cluster that Envoy + * will use to fetch an xDS API. + * [#next-free-field: 10] + */ +export interface ApiConfigSource__Output { + /** + * API type (gRPC, REST, delta gRPC) + */ + 'api_type': (_envoy_config_core_v3_ApiConfigSource_ApiType__Output); + /** + * Cluster names should be used only with REST. If > 1 + * cluster is defined, clusters will be cycled through if any kind of failure + * occurs. + * + * .. note:: + * + * The cluster with name ``cluster_name`` must be statically defined and its + * type must not be ``EDS``. + */ + 'cluster_names': (string)[]; + /** + * For REST APIs, the delay between successive polls. + */ + 'refresh_delay': (_google_protobuf_Duration__Output | null); + /** + * Multiple gRPC services be provided for GRPC. If > 1 cluster is defined, + * services will be cycled through if any kind of failure occurs. + */ + 'grpc_services': (_envoy_config_core_v3_GrpcService__Output)[]; + /** + * For REST APIs, the request timeout. If not set, a default value of 1s will be used. + */ + 'request_timeout': (_google_protobuf_Duration__Output | null); + /** + * For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be + * rate limited. + */ + 'rate_limit_settings': (_envoy_config_core_v3_RateLimitSettings__Output | null); + /** + * Skip the node identifier in subsequent discovery requests for streaming gRPC config types. + */ + 'set_node_on_first_message_only': (boolean); + /** + * API version for xDS transport protocol. This describes the xDS gRPC/REST + * endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + */ + 'transport_api_version': (_envoy_config_core_v3_ApiVersion__Output); + /** + * A list of config validators that will be executed when a new update is + * received from the ApiConfigSource. Note that each validator handles a + * specific xDS service type, and only the validators corresponding to the + * type url (in ``:ref: DiscoveryResponse`` or ``:ref: DeltaDiscoveryResponse``) + * will be invoked. + * If the validator returns false or throws an exception, the config will be rejected by + * the client, and a NACK will be sent. + * [#extension-category: envoy.config.validators] + */ + 'config_validators': (_envoy_config_core_v3_TypedExtensionConfig__Output)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/ApiVersion.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/ApiVersion.ts new file mode 100644 index 000000000..d3bad5d4e --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/ApiVersion.ts @@ -0,0 +1,53 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/config_source.proto + +/** + * xDS API and non-xDS services version. This is used to describe both resource and transport + * protocol versions (in distinct configuration fields). + */ +export const ApiVersion = { + /** + * When not specified, we assume v2, to ease migration to Envoy's stable API + * versioning. If a client does not support v2 (e.g. due to deprecation), this + * is an invalid value. + * @deprecated + */ + AUTO: 'AUTO', + /** + * Use xDS v2 API. + * @deprecated + */ + V2: 'V2', + /** + * Use xDS v3 API. + */ + V3: 'V3', +} as const; + +/** + * xDS API and non-xDS services version. This is used to describe both resource and transport + * protocol versions (in distinct configuration fields). + */ +export type ApiVersion = + /** + * When not specified, we assume v2, to ease migration to Envoy's stable API + * versioning. If a client does not support v2 (e.g. due to deprecation), this + * is an invalid value. + */ + | 'AUTO' + | 0 + /** + * Use xDS v2 API. + */ + | 'V2' + | 1 + /** + * Use xDS v3 API. + */ + | 'V3' + | 2 + +/** + * xDS API and non-xDS services version. This is used to describe both resource and transport + * protocol versions (in distinct configuration fields). + */ +export type ApiVersion__Output = typeof ApiVersion[keyof typeof ApiVersion] diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/AsyncDataSource.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/AsyncDataSource.ts new file mode 100644 index 000000000..aa152155b --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/AsyncDataSource.ts @@ -0,0 +1,34 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto + +import type { DataSource as _envoy_config_core_v3_DataSource, DataSource__Output as _envoy_config_core_v3_DataSource__Output } from '../../../../envoy/config/core/v3/DataSource'; +import type { RemoteDataSource as _envoy_config_core_v3_RemoteDataSource, RemoteDataSource__Output as _envoy_config_core_v3_RemoteDataSource__Output } from '../../../../envoy/config/core/v3/RemoteDataSource'; + +/** + * Async data source which support async data fetch. + */ +export interface AsyncDataSource { + /** + * Local async data source. + */ + 'local'?: (_envoy_config_core_v3_DataSource | null); + /** + * Remote async data source. + */ + 'remote'?: (_envoy_config_core_v3_RemoteDataSource | null); + 'specifier'?: "local"|"remote"; +} + +/** + * Async data source which support async data fetch. + */ +export interface AsyncDataSource__Output { + /** + * Local async data source. + */ + 'local'?: (_envoy_config_core_v3_DataSource__Output | null); + /** + * Remote async data source. + */ + 'remote'?: (_envoy_config_core_v3_RemoteDataSource__Output | null); + 'specifier': "local"|"remote"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/BackoffStrategy.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/BackoffStrategy.ts similarity index 58% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/BackoffStrategy.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/BackoffStrategy.ts index 48f9d904d..1049dec0c 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/BackoffStrategy.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/BackoffStrategy.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/backoff.proto +// Original file: deps/envoy-api/envoy/config/core/v3/backoff.proto import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; @@ -9,17 +9,17 @@ export interface BackoffStrategy { /** * The base interval to be used for the next back off computation. It should * be greater than zero and less than or equal to :ref:`max_interval - * `. + * `. */ - 'base_interval'?: (_google_protobuf_Duration); + 'base_interval'?: (_google_protobuf_Duration | null); /** * Specifies the maximum interval between retries. This parameter is optional, * but must be greater than or equal to the :ref:`base_interval - * ` if set. The default + * ` if set. The default * is 10 times the :ref:`base_interval - * `. + * `. */ - 'max_interval'?: (_google_protobuf_Duration); + 'max_interval'?: (_google_protobuf_Duration | null); } /** @@ -29,15 +29,15 @@ export interface BackoffStrategy__Output { /** * The base interval to be used for the next back off computation. It should * be greater than zero and less than or equal to :ref:`max_interval - * `. + * `. */ - 'base_interval'?: (_google_protobuf_Duration__Output); + 'base_interval': (_google_protobuf_Duration__Output | null); /** * Specifies the maximum interval between retries. This parameter is optional, * but must be greater than or equal to the :ref:`base_interval - * ` if set. The default + * ` if set. The default * is 10 times the :ref:`base_interval - * `. + * `. */ - 'max_interval'?: (_google_protobuf_Duration__Output); + 'max_interval': (_google_protobuf_Duration__Output | null); } diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/BindConfig.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/BindConfig.ts new file mode 100644 index 000000000..54543facc --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/BindConfig.ts @@ -0,0 +1,90 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/address.proto + +import type { SocketAddress as _envoy_config_core_v3_SocketAddress, SocketAddress__Output as _envoy_config_core_v3_SocketAddress__Output } from '../../../../envoy/config/core/v3/SocketAddress'; +import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; +import type { SocketOption as _envoy_config_core_v3_SocketOption, SocketOption__Output as _envoy_config_core_v3_SocketOption__Output } from '../../../../envoy/config/core/v3/SocketOption'; +import type { ExtraSourceAddress as _envoy_config_core_v3_ExtraSourceAddress, ExtraSourceAddress__Output as _envoy_config_core_v3_ExtraSourceAddress__Output } from '../../../../envoy/config/core/v3/ExtraSourceAddress'; + +/** + * [#next-free-field: 6] + */ +export interface BindConfig { + /** + * The address to bind to when creating a socket. + */ + 'source_address'?: (_envoy_config_core_v3_SocketAddress | null); + /** + * Whether to set the ``IP_FREEBIND`` option when creating the socket. When this + * flag is set to true, allows the :ref:`source_address + * ` to be an IP address + * that is not configured on the system running Envoy. When this flag is set + * to false, the option ``IP_FREEBIND`` is disabled on the socket. When this + * flag is not set (default), the socket is not modified, i.e. the option is + * neither enabled nor disabled. + */ + 'freebind'?: (_google_protobuf_BoolValue | null); + /** + * Additional socket options that may not be present in Envoy source code or + * precompiled binaries. + */ + 'socket_options'?: (_envoy_config_core_v3_SocketOption)[]; + /** + * Deprecated by + * :ref:`extra_source_addresses ` + * @deprecated + */ + 'additional_source_addresses'?: (_envoy_config_core_v3_SocketAddress)[]; + /** + * Extra source addresses appended to the address specified in the `source_address` + * field. This enables to specify multiple source addresses. Currently, only one extra + * address can be supported, and the extra address should have a different IP version + * with the address in the `source_address` field. The address which has the same IP + * version with the target host's address IP version will be used as bind address. If more + * than one extra address specified, only the first address matched IP version will be + * returned. If there is no same IP version address found, the address in the `source_address` + * will be returned. + */ + 'extra_source_addresses'?: (_envoy_config_core_v3_ExtraSourceAddress)[]; +} + +/** + * [#next-free-field: 6] + */ +export interface BindConfig__Output { + /** + * The address to bind to when creating a socket. + */ + 'source_address': (_envoy_config_core_v3_SocketAddress__Output | null); + /** + * Whether to set the ``IP_FREEBIND`` option when creating the socket. When this + * flag is set to true, allows the :ref:`source_address + * ` to be an IP address + * that is not configured on the system running Envoy. When this flag is set + * to false, the option ``IP_FREEBIND`` is disabled on the socket. When this + * flag is not set (default), the socket is not modified, i.e. the option is + * neither enabled nor disabled. + */ + 'freebind': (_google_protobuf_BoolValue__Output | null); + /** + * Additional socket options that may not be present in Envoy source code or + * precompiled binaries. + */ + 'socket_options': (_envoy_config_core_v3_SocketOption__Output)[]; + /** + * Deprecated by + * :ref:`extra_source_addresses ` + * @deprecated + */ + 'additional_source_addresses': (_envoy_config_core_v3_SocketAddress__Output)[]; + /** + * Extra source addresses appended to the address specified in the `source_address` + * field. This enables to specify multiple source addresses. Currently, only one extra + * address can be supported, and the extra address should have a different IP version + * with the address in the `source_address` field. The address which has the same IP + * version with the target host's address IP version will be used as bind address. If more + * than one extra address specified, only the first address matched IP version will be + * returned. If there is no same IP version address found, the address in the `source_address` + * will be returned. + */ + 'extra_source_addresses': (_envoy_config_core_v3_ExtraSourceAddress__Output)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/BuildVersion.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/BuildVersion.ts similarity index 61% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/BuildVersion.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/BuildVersion.ts index 009506fa2..1a86e918e 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/BuildVersion.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/BuildVersion.ts @@ -1,6 +1,6 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/base.proto +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto -import type { SemanticVersion as _envoy_type_SemanticVersion, SemanticVersion__Output as _envoy_type_SemanticVersion__Output } from '../../../../envoy/type/SemanticVersion'; +import type { SemanticVersion as _envoy_type_v3_SemanticVersion, SemanticVersion__Output as _envoy_type_v3_SemanticVersion__Output } from '../../../../envoy/type/v3/SemanticVersion'; import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../google/protobuf/Struct'; /** @@ -11,12 +11,12 @@ export interface BuildVersion { /** * SemVer version of extension. */ - 'version'?: (_envoy_type_SemanticVersion); + 'version'?: (_envoy_type_v3_SemanticVersion | null); /** * Free-form build information. - * Envoy defines several well known keys in the source/common/common/version.h file + * Envoy defines several well known keys in the source/common/version/version.h file */ - 'metadata'?: (_google_protobuf_Struct); + 'metadata'?: (_google_protobuf_Struct | null); } /** @@ -27,10 +27,10 @@ export interface BuildVersion__Output { /** * SemVer version of extension. */ - 'version'?: (_envoy_type_SemanticVersion__Output); + 'version': (_envoy_type_v3_SemanticVersion__Output | null); /** * Free-form build information. - * Envoy defines several well known keys in the source/common/common/version.h file + * Envoy defines several well known keys in the source/common/version/version.h file */ - 'metadata'?: (_google_protobuf_Struct__Output); + 'metadata': (_google_protobuf_Struct__Output | null); } diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/CidrRange.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/CidrRange.ts similarity index 71% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/CidrRange.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/CidrRange.ts index 00c67734f..4e01fa354 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/CidrRange.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/CidrRange.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/address.proto +// Original file: deps/envoy-api/envoy/config/core/v3/address.proto import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; @@ -12,9 +12,9 @@ export interface CidrRange { */ 'address_prefix'?: (string); /** - * Length of prefix, e.g. 0, 32. + * Length of prefix, e.g. 0, 32. Defaults to 0 when unset. */ - 'prefix_len'?: (_google_protobuf_UInt32Value); + 'prefix_len'?: (_google_protobuf_UInt32Value | null); } /** @@ -27,7 +27,7 @@ export interface CidrRange__Output { */ 'address_prefix': (string); /** - * Length of prefix, e.g. 0, 32. + * Length of prefix, e.g. 0, 32. Defaults to 0 when unset. */ - 'prefix_len'?: (_google_protobuf_UInt32Value__Output); + 'prefix_len': (_google_protobuf_UInt32Value__Output | null); } diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/ConfigSource.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/ConfigSource.ts similarity index 55% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/ConfigSource.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/ConfigSource.ts index 766af6148..1b98848ef 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/ConfigSource.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/ConfigSource.ts @@ -1,46 +1,37 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/config_source.proto +// Original file: deps/envoy-api/envoy/config/core/v3/config_source.proto -import type { ApiConfigSource as _envoy_api_v2_core_ApiConfigSource, ApiConfigSource__Output as _envoy_api_v2_core_ApiConfigSource__Output } from '../../../../envoy/api/v2/core/ApiConfigSource'; -import type { AggregatedConfigSource as _envoy_api_v2_core_AggregatedConfigSource, AggregatedConfigSource__Output as _envoy_api_v2_core_AggregatedConfigSource__Output } from '../../../../envoy/api/v2/core/AggregatedConfigSource'; +import type { ApiConfigSource as _envoy_config_core_v3_ApiConfigSource, ApiConfigSource__Output as _envoy_config_core_v3_ApiConfigSource__Output } from '../../../../envoy/config/core/v3/ApiConfigSource'; +import type { AggregatedConfigSource as _envoy_config_core_v3_AggregatedConfigSource, AggregatedConfigSource__Output as _envoy_config_core_v3_AggregatedConfigSource__Output } from '../../../../envoy/config/core/v3/AggregatedConfigSource'; import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; -import type { SelfConfigSource as _envoy_api_v2_core_SelfConfigSource, SelfConfigSource__Output as _envoy_api_v2_core_SelfConfigSource__Output } from '../../../../envoy/api/v2/core/SelfConfigSource'; -import type { ApiVersion as _envoy_api_v2_core_ApiVersion } from '../../../../envoy/api/v2/core/ApiVersion'; +import type { SelfConfigSource as _envoy_config_core_v3_SelfConfigSource, SelfConfigSource__Output as _envoy_config_core_v3_SelfConfigSource__Output } from '../../../../envoy/config/core/v3/SelfConfigSource'; +import type { ApiVersion as _envoy_config_core_v3_ApiVersion, ApiVersion__Output as _envoy_config_core_v3_ApiVersion__Output } from '../../../../envoy/config/core/v3/ApiVersion'; +import type { Authority as _xds_core_v3_Authority, Authority__Output as _xds_core_v3_Authority__Output } from '../../../../xds/core/v3/Authority'; +import type { PathConfigSource as _envoy_config_core_v3_PathConfigSource, PathConfigSource__Output as _envoy_config_core_v3_PathConfigSource__Output } from '../../../../envoy/config/core/v3/PathConfigSource'; /** * Configuration for :ref:`listeners `, :ref:`clusters * `, :ref:`routes - * `, :ref:`endpoints + * `, :ref:`endpoints * ` etc. may either be sourced from the * filesystem or from an xDS API source. Filesystem configs are watched with * inotify for updates. - * [#next-free-field: 7] + * [#next-free-field: 9] */ export interface ConfigSource { /** - * Path on the filesystem to source and watch for configuration updates. - * When sourcing configuration for :ref:`secret `, - * the certificate and key files are also watched for updates. - * - * .. note:: - * - * The path to the source must exist at config load time. - * - * .. note:: - * - * Envoy will only watch the file path for *moves.* This is because in general only moves - * are atomic. The same method of swapping files as is demonstrated in the - * :ref:`runtime documentation ` can be used here also. + * Deprecated in favor of ``path_config_source``. Use that field instead. + * @deprecated */ 'path'?: (string); /** * API configuration source. */ - 'api_config_source'?: (_envoy_api_v2_core_ApiConfigSource); + 'api_config_source'?: (_envoy_config_core_v3_ApiConfigSource | null); /** * When set, ADS will be used to fetch resources. The ADS API configuration * source in the bootstrap configuration is used. */ - 'ads'?: (_envoy_api_v2_core_AggregatedConfigSource); + 'ads'?: (_envoy_config_core_v3_AggregatedConfigSource | null); /** * When this timeout is specified, Envoy will wait no longer than the specified time for first * config response on this xDS subscription during the :ref:`initialization process @@ -50,12 +41,12 @@ export interface ConfigSource { * means no timeout - Envoy will wait indefinitely for the first xDS config (unless another * timeout applies). The default is 15s. */ - 'initial_fetch_timeout'?: (_google_protobuf_Duration); + 'initial_fetch_timeout'?: (_google_protobuf_Duration | null); /** * [#not-implemented-hide:] * When set, the client will access the resources from the same server it got the * ConfigSource from, although not necessarily from the same stream. This is similar to the - * :ref:`ads` field, except that the client may use a + * :ref:`ads` field, except that the client may use a * different stream to the same server. As a result, this field can be used for things * like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.) * LDS to RDS on the same server without requiring the management server to know its name @@ -64,51 +55,51 @@ export interface ConfigSource { * this field can implicitly mean to use the same stream in the case where the ConfigSource * is provided via ADS and the specified data can also be obtained via ADS.] */ - 'self'?: (_envoy_api_v2_core_SelfConfigSource); + 'self'?: (_envoy_config_core_v3_SelfConfigSource | null); /** * API version for xDS resources. This implies the type URLs that the client * will request for resources and the resource type that the client will in * turn expect to be delivered. */ - 'resource_api_version'?: (_envoy_api_v2_core_ApiVersion | keyof typeof _envoy_api_v2_core_ApiVersion); - 'config_source_specifier'?: "path"|"api_config_source"|"ads"|"self"; + 'resource_api_version'?: (_envoy_config_core_v3_ApiVersion); + /** + * Authorities that this config source may be used for. An authority specified in a xdstp:// URL + * is resolved to a ``ConfigSource`` prior to configuration fetch. This field provides the + * association between authority name and configuration source. + * [#not-implemented-hide:] + */ + 'authorities'?: (_xds_core_v3_Authority)[]; + /** + * Local filesystem path configuration source. + */ + 'path_config_source'?: (_envoy_config_core_v3_PathConfigSource | null); + 'config_source_specifier'?: "path"|"path_config_source"|"api_config_source"|"ads"|"self"; } /** * Configuration for :ref:`listeners `, :ref:`clusters * `, :ref:`routes - * `, :ref:`endpoints + * `, :ref:`endpoints * ` etc. may either be sourced from the * filesystem or from an xDS API source. Filesystem configs are watched with * inotify for updates. - * [#next-free-field: 7] + * [#next-free-field: 9] */ export interface ConfigSource__Output { /** - * Path on the filesystem to source and watch for configuration updates. - * When sourcing configuration for :ref:`secret `, - * the certificate and key files are also watched for updates. - * - * .. note:: - * - * The path to the source must exist at config load time. - * - * .. note:: - * - * Envoy will only watch the file path for *moves.* This is because in general only moves - * are atomic. The same method of swapping files as is demonstrated in the - * :ref:`runtime documentation ` can be used here also. + * Deprecated in favor of ``path_config_source``. Use that field instead. + * @deprecated */ 'path'?: (string); /** * API configuration source. */ - 'api_config_source'?: (_envoy_api_v2_core_ApiConfigSource__Output); + 'api_config_source'?: (_envoy_config_core_v3_ApiConfigSource__Output | null); /** * When set, ADS will be used to fetch resources. The ADS API configuration * source in the bootstrap configuration is used. */ - 'ads'?: (_envoy_api_v2_core_AggregatedConfigSource__Output); + 'ads'?: (_envoy_config_core_v3_AggregatedConfigSource__Output | null); /** * When this timeout is specified, Envoy will wait no longer than the specified time for first * config response on this xDS subscription during the :ref:`initialization process @@ -118,12 +109,12 @@ export interface ConfigSource__Output { * means no timeout - Envoy will wait indefinitely for the first xDS config (unless another * timeout applies). The default is 15s. */ - 'initial_fetch_timeout'?: (_google_protobuf_Duration__Output); + 'initial_fetch_timeout': (_google_protobuf_Duration__Output | null); /** * [#not-implemented-hide:] * When set, the client will access the resources from the same server it got the * ConfigSource from, although not necessarily from the same stream. This is similar to the - * :ref:`ads` field, except that the client may use a + * :ref:`ads` field, except that the client may use a * different stream to the same server. As a result, this field can be used for things * like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.) * LDS to RDS on the same server without requiring the management server to know its name @@ -132,12 +123,23 @@ export interface ConfigSource__Output { * this field can implicitly mean to use the same stream in the case where the ConfigSource * is provided via ADS and the specified data can also be obtained via ADS.] */ - 'self'?: (_envoy_api_v2_core_SelfConfigSource__Output); + 'self'?: (_envoy_config_core_v3_SelfConfigSource__Output | null); /** * API version for xDS resources. This implies the type URLs that the client * will request for resources and the resource type that the client will in * turn expect to be delivered. */ - 'resource_api_version': (keyof typeof _envoy_api_v2_core_ApiVersion); - 'config_source_specifier': "path"|"api_config_source"|"ads"|"self"; + 'resource_api_version': (_envoy_config_core_v3_ApiVersion__Output); + /** + * Authorities that this config source may be used for. An authority specified in a xdstp:// URL + * is resolved to a ``ConfigSource`` prior to configuration fetch. This field provides the + * association between authority name and configuration source. + * [#not-implemented-hide:] + */ + 'authorities': (_xds_core_v3_Authority__Output)[]; + /** + * Local filesystem path configuration source. + */ + 'path_config_source'?: (_envoy_config_core_v3_PathConfigSource__Output | null); + 'config_source_specifier': "path"|"path_config_source"|"api_config_source"|"ads"|"self"; } diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/ControlPlane.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/ControlPlane.ts similarity index 91% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/ControlPlane.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/ControlPlane.ts index 551f693a2..dc55f8042 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/ControlPlane.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/ControlPlane.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/base.proto +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto /** diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/DataSource.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/DataSource.ts similarity index 50% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/DataSource.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/DataSource.ts index a04100054..0774fb844 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/DataSource.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/DataSource.ts @@ -1,8 +1,8 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/base.proto +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto /** - * Data source consisting of either a file or an inline value. + * Data source consisting of a file, an inline value, or an environment variable. */ export interface DataSource { /** @@ -17,11 +17,15 @@ export interface DataSource { * String inlined in the configuration. */ 'inline_string'?: (string); - 'specifier'?: "filename"|"inline_bytes"|"inline_string"; + /** + * Environment variable data source. + */ + 'environment_variable'?: (string); + 'specifier'?: "filename"|"inline_bytes"|"inline_string"|"environment_variable"; } /** - * Data source consisting of either a file or an inline value. + * Data source consisting of a file, an inline value, or an environment variable. */ export interface DataSource__Output { /** @@ -36,5 +40,9 @@ export interface DataSource__Output { * String inlined in the configuration. */ 'inline_string'?: (string); - 'specifier': "filename"|"inline_bytes"|"inline_string"; + /** + * Environment variable data source. + */ + 'environment_variable'?: (string); + 'specifier': "filename"|"inline_bytes"|"inline_string"|"environment_variable"; } diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/DnsResolutionConfig.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/DnsResolutionConfig.ts new file mode 100644 index 000000000..cf9f7a455 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/DnsResolutionConfig.ts @@ -0,0 +1,36 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/resolver.proto + +import type { Address as _envoy_config_core_v3_Address, Address__Output as _envoy_config_core_v3_Address__Output } from '../../../../envoy/config/core/v3/Address'; +import type { DnsResolverOptions as _envoy_config_core_v3_DnsResolverOptions, DnsResolverOptions__Output as _envoy_config_core_v3_DnsResolverOptions__Output } from '../../../../envoy/config/core/v3/DnsResolverOptions'; + +/** + * DNS resolution configuration which includes the underlying dns resolver addresses and options. + */ +export interface DnsResolutionConfig { + /** + * A list of dns resolver addresses. If specified, the DNS client library will perform resolution + * via the underlying DNS resolvers. Otherwise, the default system resolvers + * (e.g., /etc/resolv.conf) will be used. + */ + 'resolvers'?: (_envoy_config_core_v3_Address)[]; + /** + * Configuration of DNS resolver option flags which control the behavior of the DNS resolver. + */ + 'dns_resolver_options'?: (_envoy_config_core_v3_DnsResolverOptions | null); +} + +/** + * DNS resolution configuration which includes the underlying dns resolver addresses and options. + */ +export interface DnsResolutionConfig__Output { + /** + * A list of dns resolver addresses. If specified, the DNS client library will perform resolution + * via the underlying DNS resolvers. Otherwise, the default system resolvers + * (e.g., /etc/resolv.conf) will be used. + */ + 'resolvers': (_envoy_config_core_v3_Address__Output)[]; + /** + * Configuration of DNS resolver option flags which control the behavior of the DNS resolver. + */ + 'dns_resolver_options': (_envoy_config_core_v3_DnsResolverOptions__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/DnsResolverOptions.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/DnsResolverOptions.ts new file mode 100644 index 000000000..e3f83b72c --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/DnsResolverOptions.ts @@ -0,0 +1,30 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/resolver.proto + + +/** + * Configuration of DNS resolver option flags which control the behavior of the DNS resolver. + */ +export interface DnsResolverOptions { + /** + * Use TCP for all DNS queries instead of the default protocol UDP. + */ + 'use_tcp_for_dns_lookups'?: (boolean); + /** + * Do not use the default search domains; only query hostnames as-is or as aliases. + */ + 'no_default_search_domain'?: (boolean); +} + +/** + * Configuration of DNS resolver option flags which control the behavior of the DNS resolver. + */ +export interface DnsResolverOptions__Output { + /** + * Use TCP for all DNS queries instead of the default protocol UDP. + */ + 'use_tcp_for_dns_lookups': (boolean); + /** + * Do not use the default search domains; only query hostnames as-is or as aliases. + */ + 'no_default_search_domain': (boolean); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/EnvoyInternalAddress.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/EnvoyInternalAddress.ts new file mode 100644 index 000000000..264e65a0a --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/EnvoyInternalAddress.ts @@ -0,0 +1,40 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/address.proto + + +/** + * The address represents an envoy internal listener. + * [#comment: TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30.] + */ +export interface EnvoyInternalAddress { + /** + * Specifies the :ref:`name ` of the + * internal listener. + */ + 'server_listener_name'?: (string); + /** + * Specifies an endpoint identifier to distinguish between multiple endpoints for the same internal listener in a + * single upstream pool. Only used in the upstream addresses for tracking changes to individual endpoints. This, for + * example, may be set to the final destination IP for the target internal listener. + */ + 'endpoint_id'?: (string); + 'address_name_specifier'?: "server_listener_name"; +} + +/** + * The address represents an envoy internal listener. + * [#comment: TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30.] + */ +export interface EnvoyInternalAddress__Output { + /** + * Specifies the :ref:`name ` of the + * internal listener. + */ + 'server_listener_name'?: (string); + /** + * Specifies an endpoint identifier to distinguish between multiple endpoints for the same internal listener in a + * single upstream pool. Only used in the upstream addresses for tracking changes to individual endpoints. This, for + * example, may be set to the final destination IP for the target internal listener. + */ + 'endpoint_id': (string); + 'address_name_specifier': "server_listener_name"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/EventServiceConfig.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/EventServiceConfig.ts similarity index 57% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/EventServiceConfig.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/EventServiceConfig.ts index d0637578c..6226b97c8 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/EventServiceConfig.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/EventServiceConfig.ts @@ -1,6 +1,6 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/event_service_config.proto +// Original file: deps/envoy-api/envoy/config/core/v3/event_service_config.proto -import type { GrpcService as _envoy_api_v2_core_GrpcService, GrpcService__Output as _envoy_api_v2_core_GrpcService__Output } from '../../../../envoy/api/v2/core/GrpcService'; +import type { GrpcService as _envoy_config_core_v3_GrpcService, GrpcService__Output as _envoy_config_core_v3_GrpcService__Output } from '../../../../envoy/config/core/v3/GrpcService'; /** * [#not-implemented-hide:] @@ -10,7 +10,7 @@ export interface EventServiceConfig { /** * Specifies the gRPC service that hosts the event reporting service. */ - 'grpc_service'?: (_envoy_api_v2_core_GrpcService); + 'grpc_service'?: (_envoy_config_core_v3_GrpcService | null); 'config_source_specifier'?: "grpc_service"; } @@ -22,6 +22,6 @@ export interface EventServiceConfig__Output { /** * Specifies the gRPC service that hosts the event reporting service. */ - 'grpc_service'?: (_envoy_api_v2_core_GrpcService__Output); + 'grpc_service'?: (_envoy_config_core_v3_GrpcService__Output | null); 'config_source_specifier': "grpc_service"; } diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/Extension.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/Extension.ts similarity index 79% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/Extension.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/Extension.ts index 418c7a360..dbbdb0cee 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/Extension.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/Extension.ts @@ -1,10 +1,10 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/base.proto +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto -import type { BuildVersion as _envoy_api_v2_core_BuildVersion, BuildVersion__Output as _envoy_api_v2_core_BuildVersion__Output } from '../../../../envoy/api/v2/core/BuildVersion'; +import type { BuildVersion as _envoy_config_core_v3_BuildVersion, BuildVersion__Output as _envoy_config_core_v3_BuildVersion__Output } from '../../../../envoy/config/core/v3/BuildVersion'; /** * Version and identification for an Envoy extension. - * [#next-free-field: 6] + * [#next-free-field: 7] */ export interface Extension { /** @@ -24,6 +24,7 @@ export interface Extension { * [#not-implemented-hide:] Type descriptor of extension configuration proto. * [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.] * [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.] + * @deprecated */ 'type_descriptor'?: (string); /** @@ -31,16 +32,20 @@ export interface Extension { * of other extensions and the Envoy API. * This field is not set when extension did not provide version information. */ - 'version'?: (_envoy_api_v2_core_BuildVersion); + 'version'?: (_envoy_config_core_v3_BuildVersion | null); /** * Indicates that the extension is present but was disabled via dynamic configuration. */ 'disabled'?: (boolean); + /** + * Type URLs of extension configuration protos. + */ + 'type_urls'?: (string)[]; } /** * Version and identification for an Envoy extension. - * [#next-free-field: 6] + * [#next-free-field: 7] */ export interface Extension__Output { /** @@ -60,6 +65,7 @@ export interface Extension__Output { * [#not-implemented-hide:] Type descriptor of extension configuration proto. * [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.] * [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.] + * @deprecated */ 'type_descriptor': (string); /** @@ -67,9 +73,13 @@ export interface Extension__Output { * of other extensions and the Envoy API. * This field is not set when extension did not provide version information. */ - 'version'?: (_envoy_api_v2_core_BuildVersion__Output); + 'version': (_envoy_config_core_v3_BuildVersion__Output | null); /** * Indicates that the extension is present but was disabled via dynamic configuration. */ 'disabled': (boolean); + /** + * Type URLs of extension configuration protos. + */ + 'type_urls': (string)[]; } diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/ExtensionConfigSource.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/ExtensionConfigSource.ts new file mode 100644 index 000000000..6342f50fc --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/ExtensionConfigSource.ts @@ -0,0 +1,72 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/config_source.proto + +import type { ConfigSource as _envoy_config_core_v3_ConfigSource, ConfigSource__Output as _envoy_config_core_v3_ConfigSource__Output } from '../../../../envoy/config/core/v3/ConfigSource'; +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; + +/** + * Configuration source specifier for a late-bound extension configuration. The + * parent resource is warmed until all the initial extension configurations are + * received, unless the flag to apply the default configuration is set. + * Subsequent extension updates are atomic on a per-worker basis. Once an + * extension configuration is applied to a request or a connection, it remains + * constant for the duration of processing. If the initial delivery of the + * extension configuration fails, due to a timeout for example, the optional + * default configuration is applied. Without a default configuration, the + * extension is disabled, until an extension configuration is received. The + * behavior of a disabled extension depends on the context. For example, a + * filter chain with a disabled extension filter rejects all incoming streams. + */ +export interface ExtensionConfigSource { + 'config_source'?: (_envoy_config_core_v3_ConfigSource | null); + /** + * Optional default configuration to use as the initial configuration if + * there is a failure to receive the initial extension configuration or if + * ``apply_default_config_without_warming`` flag is set. + */ + 'default_config'?: (_google_protobuf_Any | null); + /** + * Use the default config as the initial configuration without warming and + * waiting for the first discovery response. Requires the default configuration + * to be supplied. + */ + 'apply_default_config_without_warming'?: (boolean); + /** + * A set of permitted extension type URLs. Extension configuration updates are rejected + * if they do not match any type URL in the set. + */ + 'type_urls'?: (string)[]; +} + +/** + * Configuration source specifier for a late-bound extension configuration. The + * parent resource is warmed until all the initial extension configurations are + * received, unless the flag to apply the default configuration is set. + * Subsequent extension updates are atomic on a per-worker basis. Once an + * extension configuration is applied to a request or a connection, it remains + * constant for the duration of processing. If the initial delivery of the + * extension configuration fails, due to a timeout for example, the optional + * default configuration is applied. Without a default configuration, the + * extension is disabled, until an extension configuration is received. The + * behavior of a disabled extension depends on the context. For example, a + * filter chain with a disabled extension filter rejects all incoming streams. + */ +export interface ExtensionConfigSource__Output { + 'config_source': (_envoy_config_core_v3_ConfigSource__Output | null); + /** + * Optional default configuration to use as the initial configuration if + * there is a failure to receive the initial extension configuration or if + * ``apply_default_config_without_warming`` flag is set. + */ + 'default_config': (_google_protobuf_Any__Output | null); + /** + * Use the default config as the initial configuration without warming and + * waiting for the first discovery response. Requires the default configuration + * to be supplied. + */ + 'apply_default_config_without_warming': (boolean); + /** + * A set of permitted extension type URLs. Extension configuration updates are rejected + * if they do not match any type URL in the set. + */ + 'type_urls': (string)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/ExtraSourceAddress.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/ExtraSourceAddress.ts new file mode 100644 index 000000000..78051fafb --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/ExtraSourceAddress.ts @@ -0,0 +1,38 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/address.proto + +import type { SocketAddress as _envoy_config_core_v3_SocketAddress, SocketAddress__Output as _envoy_config_core_v3_SocketAddress__Output } from '../../../../envoy/config/core/v3/SocketAddress'; +import type { SocketOptionsOverride as _envoy_config_core_v3_SocketOptionsOverride, SocketOptionsOverride__Output as _envoy_config_core_v3_SocketOptionsOverride__Output } from '../../../../envoy/config/core/v3/SocketOptionsOverride'; + +export interface ExtraSourceAddress { + /** + * The additional address to bind. + */ + 'address'?: (_envoy_config_core_v3_SocketAddress | null); + /** + * Additional socket options that may not be present in Envoy source code or + * precompiled binaries. If specified, this will override the + * :ref:`socket_options ` + * in the BindConfig. If specified with no + * :ref:`socket_options ` + * or an empty list of :ref:`socket_options `, + * it means no socket option will apply. + */ + 'socket_options'?: (_envoy_config_core_v3_SocketOptionsOverride | null); +} + +export interface ExtraSourceAddress__Output { + /** + * The additional address to bind. + */ + 'address': (_envoy_config_core_v3_SocketAddress__Output | null); + /** + * Additional socket options that may not be present in Envoy source code or + * precompiled binaries. If specified, this will override the + * :ref:`socket_options ` + * in the BindConfig. If specified with no + * :ref:`socket_options ` + * or an empty list of :ref:`socket_options `, + * it means no socket option will apply. + */ + 'socket_options': (_envoy_config_core_v3_SocketOptionsOverride__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/GrpcProtocolOptions.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/GrpcProtocolOptions.ts new file mode 100644 index 000000000..1f0376a1c --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/GrpcProtocolOptions.ts @@ -0,0 +1,17 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/protocol.proto + +import type { Http2ProtocolOptions as _envoy_config_core_v3_Http2ProtocolOptions, Http2ProtocolOptions__Output as _envoy_config_core_v3_Http2ProtocolOptions__Output } from '../../../../envoy/config/core/v3/Http2ProtocolOptions'; + +/** + * [#not-implemented-hide:] + */ +export interface GrpcProtocolOptions { + 'http2_protocol_options'?: (_envoy_config_core_v3_Http2ProtocolOptions | null); +} + +/** + * [#not-implemented-hide:] + */ +export interface GrpcProtocolOptions__Output { + 'http2_protocol_options': (_envoy_config_core_v3_Http2ProtocolOptions__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/GrpcService.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/GrpcService.ts similarity index 53% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/GrpcService.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/GrpcService.ts index 3bc894b57..eaeeff52c 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/GrpcService.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/GrpcService.ts @@ -1,9 +1,11 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/grpc_service.proto +// Original file: deps/envoy-api/envoy/config/core/v3/grpc_service.proto import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; -import type { HeaderValue as _envoy_api_v2_core_HeaderValue, HeaderValue__Output as _envoy_api_v2_core_HeaderValue__Output } from '../../../../envoy/api/v2/core/HeaderValue'; +import type { HeaderValue as _envoy_config_core_v3_HeaderValue, HeaderValue__Output as _envoy_config_core_v3_HeaderValue__Output } from '../../../../envoy/config/core/v3/HeaderValue'; +import type { RetryPolicy as _envoy_config_core_v3_RetryPolicy, RetryPolicy__Output as _envoy_config_core_v3_RetryPolicy__Output } from '../../../../envoy/config/core/v3/RetryPolicy'; import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../google/protobuf/Struct'; -import type { DataSource as _envoy_api_v2_core_DataSource, DataSource__Output as _envoy_api_v2_core_DataSource__Output } from '../../../../envoy/api/v2/core/DataSource'; +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; +import type { DataSource as _envoy_config_core_v3_DataSource, DataSource__Output as _envoy_config_core_v3_DataSource__Output } from '../../../../envoy/config/core/v3/DataSource'; import type { Empty as _google_protobuf_Empty, Empty__Output as _google_protobuf_Empty__Output } from '../../../../google/protobuf/Empty'; import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; import type { Long } from '@grpc/proto-loader'; @@ -11,7 +13,7 @@ import type { Long } from '@grpc/proto-loader'; /** * [#next-free-field: 8] */ -export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials { +export interface _envoy_config_core_v3_GrpcService_GoogleGrpc_CallCredentials { /** * Access token credentials. * https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d. @@ -21,7 +23,7 @@ export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials { * Google Compute Engine credentials. * https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 */ - 'google_compute_engine'?: (_google_protobuf_Empty); + 'google_compute_engine'?: (_google_protobuf_Empty | null); /** * Google refresh token credentials. * https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c. @@ -31,31 +33,31 @@ export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials { * Service Account JWT Access credentials. * https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa. */ - 'service_account_jwt_access'?: (_envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials); + 'service_account_jwt_access'?: (_envoy_config_core_v3_GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials | null); /** * Google IAM credentials. * https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0. */ - 'google_iam'?: (_envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials); + 'google_iam'?: (_envoy_config_core_v3_GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials | null); /** * Custom authenticator credentials. * https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07. * https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms. */ - 'from_plugin'?: (_envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin); + 'from_plugin'?: (_envoy_config_core_v3_GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin | null); /** * Custom security token service which implements OAuth 2.0 token exchange. * https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 * See https://github.com/grpc/grpc/pull/19587. */ - 'sts_service'?: (_envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials_StsService); + 'sts_service'?: (_envoy_config_core_v3_GrpcService_GoogleGrpc_CallCredentials_StsService | null); 'credential_specifier'?: "access_token"|"google_compute_engine"|"google_refresh_token"|"service_account_jwt_access"|"google_iam"|"from_plugin"|"sts_service"; } /** * [#next-free-field: 8] */ -export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials__Output { +export interface _envoy_config_core_v3_GrpcService_GoogleGrpc_CallCredentials__Output { /** * Access token credentials. * https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d. @@ -65,7 +67,7 @@ export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials__Outp * Google Compute Engine credentials. * https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 */ - 'google_compute_engine'?: (_google_protobuf_Empty__Output); + 'google_compute_engine'?: (_google_protobuf_Empty__Output | null); /** * Google refresh token credentials. * https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c. @@ -75,38 +77,58 @@ export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials__Outp * Service Account JWT Access credentials. * https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa. */ - 'service_account_jwt_access'?: (_envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials__Output); + 'service_account_jwt_access'?: (_envoy_config_core_v3_GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials__Output | null); /** * Google IAM credentials. * https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0. */ - 'google_iam'?: (_envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials__Output); + 'google_iam'?: (_envoy_config_core_v3_GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials__Output | null); /** * Custom authenticator credentials. * https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07. * https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms. */ - 'from_plugin'?: (_envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin__Output); + 'from_plugin'?: (_envoy_config_core_v3_GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin__Output | null); /** * Custom security token service which implements OAuth 2.0 token exchange. * https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 * See https://github.com/grpc/grpc/pull/19587. */ - 'sts_service'?: (_envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials_StsService__Output); + 'sts_service'?: (_envoy_config_core_v3_GrpcService_GoogleGrpc_CallCredentials_StsService__Output | null); 'credential_specifier': "access_token"|"google_compute_engine"|"google_refresh_token"|"service_account_jwt_access"|"google_iam"|"from_plugin"|"sts_service"; } +/** + * Channel arguments. + */ +export interface _envoy_config_core_v3_GrpcService_GoogleGrpc_ChannelArgs { + /** + * See grpc_types.h GRPC_ARG #defines for keys that work here. + */ + 'args'?: ({[key: string]: _envoy_config_core_v3_GrpcService_GoogleGrpc_ChannelArgs_Value}); +} + +/** + * Channel arguments. + */ +export interface _envoy_config_core_v3_GrpcService_GoogleGrpc_ChannelArgs__Output { + /** + * See grpc_types.h GRPC_ARG #defines for keys that work here. + */ + 'args': ({[key: string]: _envoy_config_core_v3_GrpcService_GoogleGrpc_ChannelArgs_Value__Output}); +} + /** * See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call * credential types. */ -export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_ChannelCredentials { - 'ssl_credentials'?: (_envoy_api_v2_core_GrpcService_GoogleGrpc_SslCredentials); +export interface _envoy_config_core_v3_GrpcService_GoogleGrpc_ChannelCredentials { + 'ssl_credentials'?: (_envoy_config_core_v3_GrpcService_GoogleGrpc_SslCredentials | null); /** * https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 */ - 'google_default'?: (_google_protobuf_Empty); - 'local_credentials'?: (_envoy_api_v2_core_GrpcService_GoogleGrpc_GoogleLocalCredentials); + 'google_default'?: (_google_protobuf_Empty | null); + 'local_credentials'?: (_envoy_config_core_v3_GrpcService_GoogleGrpc_GoogleLocalCredentials | null); 'credential_specifier'?: "ssl_credentials"|"google_default"|"local_credentials"; } @@ -114,50 +136,74 @@ export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_ChannelCredentials { * See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call * credential types. */ -export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_ChannelCredentials__Output { - 'ssl_credentials'?: (_envoy_api_v2_core_GrpcService_GoogleGrpc_SslCredentials__Output); +export interface _envoy_config_core_v3_GrpcService_GoogleGrpc_ChannelCredentials__Output { + 'ssl_credentials'?: (_envoy_config_core_v3_GrpcService_GoogleGrpc_SslCredentials__Output | null); /** * https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 */ - 'google_default'?: (_google_protobuf_Empty__Output); - 'local_credentials'?: (_envoy_api_v2_core_GrpcService_GoogleGrpc_GoogleLocalCredentials__Output); + 'google_default'?: (_google_protobuf_Empty__Output | null); + 'local_credentials'?: (_envoy_config_core_v3_GrpcService_GoogleGrpc_GoogleLocalCredentials__Output | null); 'credential_specifier': "ssl_credentials"|"google_default"|"local_credentials"; } -export interface _envoy_api_v2_core_GrpcService_EnvoyGrpc { +export interface _envoy_config_core_v3_GrpcService_EnvoyGrpc { /** * The name of the upstream gRPC cluster. SSL credentials will be supplied - * in the :ref:`Cluster ` :ref:`transport_socket - * `. + * in the :ref:`Cluster ` :ref:`transport_socket + * `. */ 'cluster_name'?: (string); + /** + * The ``:authority`` header in the grpc request. If this field is not set, the authority header value will be ``cluster_name``. + * Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. + */ + 'authority'?: (string); + /** + * Indicates the retry policy for re-establishing the gRPC stream + * This field is optional. If max interval is not provided, it will be set to ten times the provided base interval. + * Currently only supported for xDS gRPC streams. + * If not set, xDS gRPC streams default base interval:500ms, maximum interval:30s will be applied. + */ + 'retry_policy'?: (_envoy_config_core_v3_RetryPolicy | null); } -export interface _envoy_api_v2_core_GrpcService_EnvoyGrpc__Output { +export interface _envoy_config_core_v3_GrpcService_EnvoyGrpc__Output { /** * The name of the upstream gRPC cluster. SSL credentials will be supplied - * in the :ref:`Cluster ` :ref:`transport_socket - * `. + * in the :ref:`Cluster ` :ref:`transport_socket + * `. */ 'cluster_name': (string); + /** + * The ``:authority`` header in the grpc request. If this field is not set, the authority header value will be ``cluster_name``. + * Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. + */ + 'authority': (string); + /** + * Indicates the retry policy for re-establishing the gRPC stream + * This field is optional. If max interval is not provided, it will be set to ten times the provided base interval. + * Currently only supported for xDS gRPC streams. + * If not set, xDS gRPC streams default base interval:500ms, maximum interval:30s will be applied. + */ + 'retry_policy': (_envoy_config_core_v3_RetryPolicy__Output | null); } /** - * [#next-free-field: 7] + * [#next-free-field: 9] */ -export interface _envoy_api_v2_core_GrpcService_GoogleGrpc { +export interface _envoy_config_core_v3_GrpcService_GoogleGrpc { /** * The target URI when using the `Google C++ gRPC client * `_. SSL credentials will be supplied in - * :ref:`channel_credentials `. + * :ref:`channel_credentials `. */ 'target_uri'?: (string); - 'channel_credentials'?: (_envoy_api_v2_core_GrpcService_GoogleGrpc_ChannelCredentials); + 'channel_credentials'?: (_envoy_config_core_v3_GrpcService_GoogleGrpc_ChannelCredentials | null); /** * A set of call credentials that can be composed with `channel credentials * `_. */ - 'call_credentials'?: (_envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials)[]; + 'call_credentials'?: (_envoy_config_core_v3_GrpcService_GoogleGrpc_CallCredentials)[]; /** * The human readable prefix to use when emitting statistics for the gRPC * service. @@ -180,25 +226,34 @@ export interface _envoy_api_v2_core_GrpcService_GoogleGrpc { * Additional configuration for site-specific customizations of the Google * gRPC library. */ - 'config'?: (_google_protobuf_Struct); + 'config'?: (_google_protobuf_Struct | null); + /** + * How many bytes each stream can buffer internally. + * If not set an implementation defined default is applied (1MiB). + */ + 'per_stream_buffer_limit_bytes'?: (_google_protobuf_UInt32Value | null); + /** + * Custom channels args. + */ + 'channel_args'?: (_envoy_config_core_v3_GrpcService_GoogleGrpc_ChannelArgs | null); } /** - * [#next-free-field: 7] + * [#next-free-field: 9] */ -export interface _envoy_api_v2_core_GrpcService_GoogleGrpc__Output { +export interface _envoy_config_core_v3_GrpcService_GoogleGrpc__Output { /** * The target URI when using the `Google C++ gRPC client * `_. SSL credentials will be supplied in - * :ref:`channel_credentials `. + * :ref:`channel_credentials `. */ 'target_uri': (string); - 'channel_credentials'?: (_envoy_api_v2_core_GrpcService_GoogleGrpc_ChannelCredentials__Output); + 'channel_credentials': (_envoy_config_core_v3_GrpcService_GoogleGrpc_ChannelCredentials__Output | null); /** * A set of call credentials that can be composed with `channel credentials * `_. */ - 'call_credentials': (_envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials__Output)[]; + 'call_credentials': (_envoy_config_core_v3_GrpcService_GoogleGrpc_CallCredentials__Output)[]; /** * The human readable prefix to use when emitting statistics for the gRPC * service. @@ -221,15 +276,24 @@ export interface _envoy_api_v2_core_GrpcService_GoogleGrpc__Output { * Additional configuration for site-specific customizations of the Google * gRPC library. */ - 'config'?: (_google_protobuf_Struct__Output); + 'config': (_google_protobuf_Struct__Output | null); + /** + * How many bytes each stream can buffer internally. + * If not set an implementation defined default is applied (1MiB). + */ + 'per_stream_buffer_limit_bytes': (_google_protobuf_UInt32Value__Output | null); + /** + * Custom channels args. + */ + 'channel_args': (_envoy_config_core_v3_GrpcService_GoogleGrpc_ChannelArgs__Output | null); } -export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials { +export interface _envoy_config_core_v3_GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials { 'authorization_token'?: (string); 'authority_selector'?: (string); } -export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials__Output { +export interface _envoy_config_core_v3_GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials__Output { 'authorization_token': (string); 'authority_selector': (string); } @@ -238,36 +302,40 @@ export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials_Googl * Local channel credentials. Only UDS is supported for now. * See https://github.com/grpc/grpc/pull/15909. */ -export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_GoogleLocalCredentials { +export interface _envoy_config_core_v3_GrpcService_GoogleGrpc_GoogleLocalCredentials { } /** * Local channel credentials. Only UDS is supported for now. * See https://github.com/grpc/grpc/pull/15909. */ -export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_GoogleLocalCredentials__Output { +export interface _envoy_config_core_v3_GrpcService_GoogleGrpc_GoogleLocalCredentials__Output { } -export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin { +export interface _envoy_config_core_v3_GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin { 'name'?: (string); - 'config'?: (_google_protobuf_Struct); - 'typed_config'?: (_google_protobuf_Any); - 'config_type'?: "config"|"typed_config"; + 'typed_config'?: (_google_protobuf_Any | null); + /** + * [#extension-category: envoy.grpc_credentials] + */ + 'config_type'?: "typed_config"; } -export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin__Output { +export interface _envoy_config_core_v3_GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin__Output { 'name': (string); - 'config'?: (_google_protobuf_Struct__Output); - 'typed_config'?: (_google_protobuf_Any__Output); - 'config_type': "config"|"typed_config"; + 'typed_config'?: (_google_protobuf_Any__Output | null); + /** + * [#extension-category: envoy.grpc_credentials] + */ + 'config_type': "typed_config"; } -export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials { +export interface _envoy_config_core_v3_GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials { 'json_key'?: (string); 'token_lifetime_seconds'?: (number | string | Long); } -export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials__Output { +export interface _envoy_config_core_v3_GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials__Output { 'json_key': (string); 'token_lifetime_seconds': (string); } @@ -275,37 +343,37 @@ export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials_Servi /** * See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. */ -export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_SslCredentials { +export interface _envoy_config_core_v3_GrpcService_GoogleGrpc_SslCredentials { /** * PEM encoded server root certificates. */ - 'root_certs'?: (_envoy_api_v2_core_DataSource); + 'root_certs'?: (_envoy_config_core_v3_DataSource | null); /** * PEM encoded client private key. */ - 'private_key'?: (_envoy_api_v2_core_DataSource); + 'private_key'?: (_envoy_config_core_v3_DataSource | null); /** * PEM encoded client certificate chain. */ - 'cert_chain'?: (_envoy_api_v2_core_DataSource); + 'cert_chain'?: (_envoy_config_core_v3_DataSource | null); } /** * See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. */ -export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_SslCredentials__Output { +export interface _envoy_config_core_v3_GrpcService_GoogleGrpc_SslCredentials__Output { /** * PEM encoded server root certificates. */ - 'root_certs'?: (_envoy_api_v2_core_DataSource__Output); + 'root_certs': (_envoy_config_core_v3_DataSource__Output | null); /** * PEM encoded client private key. */ - 'private_key'?: (_envoy_api_v2_core_DataSource__Output); + 'private_key': (_envoy_config_core_v3_DataSource__Output | null); /** * PEM encoded client certificate chain. */ - 'cert_chain'?: (_envoy_api_v2_core_DataSource__Output); + 'cert_chain': (_envoy_config_core_v3_DataSource__Output | null); } /** @@ -315,11 +383,11 @@ export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_SslCredentials__Outpu * https://github.com/grpc/grpc/pull/19587. * [#next-free-field: 10] */ -export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials_StsService { +export interface _envoy_config_core_v3_GrpcService_GoogleGrpc_CallCredentials_StsService { /** * URI of the token exchange service that handles token exchange requests. * [#comment:TODO(asraa): Add URI validation when implemented. Tracked by - * https://github.com/envoyproxy/protoc-gen-validate/issues/303] + * https://github.com/bufbuild/protoc-gen-validate/issues/303] */ 'token_exchange_service_uri'?: (string); /** @@ -369,11 +437,11 @@ export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials_StsSe * https://github.com/grpc/grpc/pull/19587. * [#next-free-field: 10] */ -export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials_StsService__Output { +export interface _envoy_config_core_v3_GrpcService_GoogleGrpc_CallCredentials_StsService__Output { /** * URI of the token exchange service that handles token exchange requests. * [#comment:TODO(asraa): Add URI validation when implemented. Tracked by - * https://github.com/envoyproxy/protoc-gen-validate/issues/303] + * https://github.com/bufbuild/protoc-gen-validate/issues/303] */ 'token_exchange_service_uri': (string); /** @@ -416,9 +484,29 @@ export interface _envoy_api_v2_core_GrpcService_GoogleGrpc_CallCredentials_StsSe 'actor_token_type': (string); } +export interface _envoy_config_core_v3_GrpcService_GoogleGrpc_ChannelArgs_Value { + 'string_value'?: (string); + 'int_value'?: (number | string | Long); + /** + * Pointer values are not supported, since they don't make any sense when + * delivered via the API. + */ + 'value_specifier'?: "string_value"|"int_value"; +} + +export interface _envoy_config_core_v3_GrpcService_GoogleGrpc_ChannelArgs_Value__Output { + 'string_value'?: (string); + 'int_value'?: (string); + /** + * Pointer values are not supported, since they don't make any sense when + * delivered via the API. + */ + 'value_specifier': "string_value"|"int_value"; +} + /** * gRPC service configuration. This is used by :ref:`ApiConfigSource - * ` and filter configurations. + * ` and filter configurations. * [#next-free-field: 6] */ export interface GrpcService { @@ -427,30 +515,32 @@ export interface GrpcService { * See the :ref:`gRPC services overview ` * documentation for discussion on gRPC client selection. */ - 'envoy_grpc'?: (_envoy_api_v2_core_GrpcService_EnvoyGrpc); + 'envoy_grpc'?: (_envoy_config_core_v3_GrpcService_EnvoyGrpc | null); /** * `Google C++ gRPC client `_ * See the :ref:`gRPC services overview ` * documentation for discussion on gRPC client selection. */ - 'google_grpc'?: (_envoy_api_v2_core_GrpcService_GoogleGrpc); + 'google_grpc'?: (_envoy_config_core_v3_GrpcService_GoogleGrpc | null); /** * The timeout for the gRPC request. This is the timeout for a specific * request. */ - 'timeout'?: (_google_protobuf_Duration); + 'timeout'?: (_google_protobuf_Duration | null); /** - * Additional metadata to include in streams initiated to the GrpcService. - * This can be used for scenarios in which additional ad hoc authorization - * headers (e.g. ``x-foo-bar: baz-key``) are to be injected. + * Additional metadata to include in streams initiated to the GrpcService. This can be used for + * scenarios in which additional ad hoc authorization headers (e.g. ``x-foo-bar: baz-key``) are to + * be injected. For more information, including details on header value syntax, see the + * documentation on :ref:`custom request headers + * `. */ - 'initial_metadata'?: (_envoy_api_v2_core_HeaderValue)[]; + 'initial_metadata'?: (_envoy_config_core_v3_HeaderValue)[]; 'target_specifier'?: "envoy_grpc"|"google_grpc"; } /** * gRPC service configuration. This is used by :ref:`ApiConfigSource - * ` and filter configurations. + * ` and filter configurations. * [#next-free-field: 6] */ export interface GrpcService__Output { @@ -459,23 +549,25 @@ export interface GrpcService__Output { * See the :ref:`gRPC services overview ` * documentation for discussion on gRPC client selection. */ - 'envoy_grpc'?: (_envoy_api_v2_core_GrpcService_EnvoyGrpc__Output); + 'envoy_grpc'?: (_envoy_config_core_v3_GrpcService_EnvoyGrpc__Output | null); /** * `Google C++ gRPC client `_ * See the :ref:`gRPC services overview ` * documentation for discussion on gRPC client selection. */ - 'google_grpc'?: (_envoy_api_v2_core_GrpcService_GoogleGrpc__Output); + 'google_grpc'?: (_envoy_config_core_v3_GrpcService_GoogleGrpc__Output | null); /** * The timeout for the gRPC request. This is the timeout for a specific * request. */ - 'timeout'?: (_google_protobuf_Duration__Output); + 'timeout': (_google_protobuf_Duration__Output | null); /** - * Additional metadata to include in streams initiated to the GrpcService. - * This can be used for scenarios in which additional ad hoc authorization - * headers (e.g. ``x-foo-bar: baz-key``) are to be injected. + * Additional metadata to include in streams initiated to the GrpcService. This can be used for + * scenarios in which additional ad hoc authorization headers (e.g. ``x-foo-bar: baz-key``) are to + * be injected. For more information, including details on header value syntax, see the + * documentation on :ref:`custom request headers + * `. */ - 'initial_metadata': (_envoy_api_v2_core_HeaderValue__Output)[]; + 'initial_metadata': (_envoy_config_core_v3_HeaderValue__Output)[]; 'target_specifier': "envoy_grpc"|"google_grpc"; } diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/HeaderMap.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/HeaderMap.ts new file mode 100644 index 000000000..3ee496152 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/HeaderMap.ts @@ -0,0 +1,17 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto + +import type { HeaderValue as _envoy_config_core_v3_HeaderValue, HeaderValue__Output as _envoy_config_core_v3_HeaderValue__Output } from '../../../../envoy/config/core/v3/HeaderValue'; + +/** + * Wrapper for a set of headers. + */ +export interface HeaderMap { + 'headers'?: (_envoy_config_core_v3_HeaderValue)[]; +} + +/** + * Wrapper for a set of headers. + */ +export interface HeaderMap__Output { + 'headers': (_envoy_config_core_v3_HeaderValue__Output)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/HeaderValue.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/HeaderValue.ts similarity index 89% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/HeaderValue.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/HeaderValue.ts index b36605324..1cac90213 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/HeaderValue.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/HeaderValue.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/base.proto +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto /** @@ -14,7 +14,7 @@ export interface HeaderValue { * * The same :ref:`format specifier ` as used for * :ref:`HTTP access logging ` applies here, however - * unknown header values are replaced with the empty string instead of `-`. + * unknown header values are replaced with the empty string instead of ``-``. */ 'value'?: (string); } @@ -32,7 +32,7 @@ export interface HeaderValue__Output { * * The same :ref:`format specifier ` as used for * :ref:`HTTP access logging ` applies here, however - * unknown header values are replaced with the empty string instead of `-`. + * unknown header values are replaced with the empty string instead of ``-``. */ 'value': (string); } diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/HeaderValueOption.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/HeaderValueOption.ts new file mode 100644 index 000000000..e7a0a8d87 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/HeaderValueOption.ts @@ -0,0 +1,129 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto + +import type { HeaderValue as _envoy_config_core_v3_HeaderValue, HeaderValue__Output as _envoy_config_core_v3_HeaderValue__Output } from '../../../../envoy/config/core/v3/HeaderValue'; +import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; + +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto + +/** + * Describes the supported actions types for header append action. + */ +export const _envoy_config_core_v3_HeaderValueOption_HeaderAppendAction = { + /** + * This action will append the specified value to the existing values if the header + * already exists. If the header doesn't exist then this will add the header with + * specified key and value. + */ + APPEND_IF_EXISTS_OR_ADD: 'APPEND_IF_EXISTS_OR_ADD', + /** + * This action will add the header if it doesn't already exist. If the header + * already exists then this will be a no-op. + */ + ADD_IF_ABSENT: 'ADD_IF_ABSENT', + /** + * This action will overwrite the specified value by discarding any existing values if + * the header already exists. If the header doesn't exist then this will add the header + * with specified key and value. + */ + OVERWRITE_IF_EXISTS_OR_ADD: 'OVERWRITE_IF_EXISTS_OR_ADD', +} as const; + +/** + * Describes the supported actions types for header append action. + */ +export type _envoy_config_core_v3_HeaderValueOption_HeaderAppendAction = + /** + * This action will append the specified value to the existing values if the header + * already exists. If the header doesn't exist then this will add the header with + * specified key and value. + */ + | 'APPEND_IF_EXISTS_OR_ADD' + | 0 + /** + * This action will add the header if it doesn't already exist. If the header + * already exists then this will be a no-op. + */ + | 'ADD_IF_ABSENT' + | 1 + /** + * This action will overwrite the specified value by discarding any existing values if + * the header already exists. If the header doesn't exist then this will add the header + * with specified key and value. + */ + | 'OVERWRITE_IF_EXISTS_OR_ADD' + | 2 + +/** + * Describes the supported actions types for header append action. + */ +export type _envoy_config_core_v3_HeaderValueOption_HeaderAppendAction__Output = typeof _envoy_config_core_v3_HeaderValueOption_HeaderAppendAction[keyof typeof _envoy_config_core_v3_HeaderValueOption_HeaderAppendAction] + +/** + * Header name/value pair plus option to control append behavior. + */ +export interface HeaderValueOption { + /** + * Header name/value pair that this option applies to. + */ + 'header'?: (_envoy_config_core_v3_HeaderValue | null); + /** + * Should the value be appended? If true (default), the value is appended to + * existing values. Otherwise it replaces any existing values. + * This field is deprecated and please use + * :ref:`append_action ` as replacement. + * + * .. note:: + * The :ref:`external authorization service ` and + * :ref:`external processor service ` have + * default value (``false``) for this field. + * @deprecated + */ + 'append'?: (_google_protobuf_BoolValue | null); + /** + * Describes the action taken to append/overwrite the given value for an existing header + * or to only add this header if it's absent. + * Value defaults to :ref:`APPEND_IF_EXISTS_OR_ADD + * `. + */ + 'append_action'?: (_envoy_config_core_v3_HeaderValueOption_HeaderAppendAction); + /** + * Is the header value allowed to be empty? If false (default), custom headers with empty values are dropped, + * otherwise they are added. + */ + 'keep_empty_value'?: (boolean); +} + +/** + * Header name/value pair plus option to control append behavior. + */ +export interface HeaderValueOption__Output { + /** + * Header name/value pair that this option applies to. + */ + 'header': (_envoy_config_core_v3_HeaderValue__Output | null); + /** + * Should the value be appended? If true (default), the value is appended to + * existing values. Otherwise it replaces any existing values. + * This field is deprecated and please use + * :ref:`append_action ` as replacement. + * + * .. note:: + * The :ref:`external authorization service ` and + * :ref:`external processor service ` have + * default value (``false``) for this field. + * @deprecated + */ + 'append': (_google_protobuf_BoolValue__Output | null); + /** + * Describes the action taken to append/overwrite the given value for an existing header + * or to only add this header if it's absent. + * Value defaults to :ref:`APPEND_IF_EXISTS_OR_ADD + * `. + */ + 'append_action': (_envoy_config_core_v3_HeaderValueOption_HeaderAppendAction__Output); + /** + * Is the header value allowed to be empty? If false (default), custom headers with empty values are dropped, + * otherwise they are added. + */ + 'keep_empty_value': (boolean); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/HealthCheck.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/HealthCheck.ts new file mode 100644 index 000000000..f6605412e --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/HealthCheck.ts @@ -0,0 +1,779 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/health_check.proto + +import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; +import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; +import type { EventServiceConfig as _envoy_config_core_v3_EventServiceConfig, EventServiceConfig__Output as _envoy_config_core_v3_EventServiceConfig__Output } from '../../../../envoy/config/core/v3/EventServiceConfig'; +import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../google/protobuf/Struct'; +import type { TypedExtensionConfig as _envoy_config_core_v3_TypedExtensionConfig, TypedExtensionConfig__Output as _envoy_config_core_v3_TypedExtensionConfig__Output } from '../../../../envoy/config/core/v3/TypedExtensionConfig'; +import type { UInt64Value as _google_protobuf_UInt64Value, UInt64Value__Output as _google_protobuf_UInt64Value__Output } from '../../../../google/protobuf/UInt64Value'; +import type { HeaderValueOption as _envoy_config_core_v3_HeaderValueOption, HeaderValueOption__Output as _envoy_config_core_v3_HeaderValueOption__Output } from '../../../../envoy/config/core/v3/HeaderValueOption'; +import type { Int64Range as _envoy_type_v3_Int64Range, Int64Range__Output as _envoy_type_v3_Int64Range__Output } from '../../../../envoy/type/v3/Int64Range'; +import type { CodecClientType as _envoy_type_v3_CodecClientType, CodecClientType__Output as _envoy_type_v3_CodecClientType__Output } from '../../../../envoy/type/v3/CodecClientType'; +import type { StringMatcher as _envoy_type_matcher_v3_StringMatcher, StringMatcher__Output as _envoy_type_matcher_v3_StringMatcher__Output } from '../../../../envoy/type/matcher/v3/StringMatcher'; +import type { RequestMethod as _envoy_config_core_v3_RequestMethod, RequestMethod__Output as _envoy_config_core_v3_RequestMethod__Output } from '../../../../envoy/config/core/v3/RequestMethod'; +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; + +/** + * Custom health check. + */ +export interface _envoy_config_core_v3_HealthCheck_CustomHealthCheck { + /** + * The registered name of the custom health checker. + */ + 'name'?: (string); + 'typed_config'?: (_google_protobuf_Any | null); + /** + * A custom health checker specific configuration which depends on the custom health checker + * being instantiated. See :api:`envoy/config/health_checker` for reference. + * [#extension-category: envoy.health_checkers] + */ + 'config_type'?: "typed_config"; +} + +/** + * Custom health check. + */ +export interface _envoy_config_core_v3_HealthCheck_CustomHealthCheck__Output { + /** + * The registered name of the custom health checker. + */ + 'name': (string); + 'typed_config'?: (_google_protobuf_Any__Output | null); + /** + * A custom health checker specific configuration which depends on the custom health checker + * being instantiated. See :api:`envoy/config/health_checker` for reference. + * [#extension-category: envoy.health_checkers] + */ + 'config_type': "typed_config"; +} + +/** + * `grpc.health.v1.Health + * `_-based + * healthcheck. See `gRPC doc `_ + * for details. + */ +export interface _envoy_config_core_v3_HealthCheck_GrpcHealthCheck { + /** + * An optional service name parameter which will be sent to gRPC service in + * `grpc.health.v1.HealthCheckRequest + * `_. + * message. See `gRPC health-checking overview + * `_ for more information. + */ + 'service_name'?: (string); + /** + * The value of the :authority header in the gRPC health check request. If + * left empty (default value), the name of the cluster this health check is associated + * with will be used. The authority header can be customized for a specific endpoint by setting + * the :ref:`hostname ` field. + */ + 'authority'?: (string); + /** + * Specifies a list of key-value pairs that should be added to the metadata of each GRPC call + * that is sent to the health checked cluster. For more information, including details on header value syntax, + * see the documentation on :ref:`custom request headers + * `. + */ + 'initial_metadata'?: (_envoy_config_core_v3_HeaderValueOption)[]; +} + +/** + * `grpc.health.v1.Health + * `_-based + * healthcheck. See `gRPC doc `_ + * for details. + */ +export interface _envoy_config_core_v3_HealthCheck_GrpcHealthCheck__Output { + /** + * An optional service name parameter which will be sent to gRPC service in + * `grpc.health.v1.HealthCheckRequest + * `_. + * message. See `gRPC health-checking overview + * `_ for more information. + */ + 'service_name': (string); + /** + * The value of the :authority header in the gRPC health check request. If + * left empty (default value), the name of the cluster this health check is associated + * with will be used. The authority header can be customized for a specific endpoint by setting + * the :ref:`hostname ` field. + */ + 'authority': (string); + /** + * Specifies a list of key-value pairs that should be added to the metadata of each GRPC call + * that is sent to the health checked cluster. For more information, including details on header value syntax, + * see the documentation on :ref:`custom request headers + * `. + */ + 'initial_metadata': (_envoy_config_core_v3_HeaderValueOption__Output)[]; +} + +/** + * [#next-free-field: 15] + */ +export interface _envoy_config_core_v3_HealthCheck_HttpHealthCheck { + /** + * The value of the host header in the HTTP health check request. If + * left empty (default value), the name of the cluster this health check is associated + * with will be used. The host header can be customized for a specific endpoint by setting the + * :ref:`hostname ` field. + */ + 'host'?: (string); + /** + * Specifies the HTTP path that will be requested during health checking. For example + * ``/healthcheck``. + */ + 'path'?: (string); + /** + * [#not-implemented-hide:] HTTP specific payload. + */ + 'send'?: (_envoy_config_core_v3_HealthCheck_Payload | null); + /** + * Specifies a list of HTTP expected responses to match in the first ``response_buffer_size`` bytes of the response body. + * If it is set, both the expected response check and status code determine the health check. + * When checking the response, “fuzzy” matching is performed such that each payload block must be found, + * and in the order specified, but not necessarily contiguous. + * + * .. note:: + * + * It is recommended to set ``response_buffer_size`` based on the total Payload size for efficiency. + * The default buffer size is 1024 bytes when it is not set. + */ + 'receive'?: (_envoy_config_core_v3_HealthCheck_Payload)[]; + /** + * Specifies the size of response buffer in bytes that is used to Payload match. + * The default value is 1024. Setting to 0 implies that the Payload will be matched against the entire response. + */ + 'response_buffer_size'?: (_google_protobuf_UInt64Value | null); + /** + * Specifies a list of HTTP headers that should be added to each request that is sent to the + * health checked cluster. For more information, including details on header value syntax, see + * the documentation on :ref:`custom request headers + * `. + */ + 'request_headers_to_add'?: (_envoy_config_core_v3_HeaderValueOption)[]; + /** + * Specifies a list of HTTP headers that should be removed from each request that is sent to the + * health checked cluster. + */ + 'request_headers_to_remove'?: (string)[]; + /** + * Specifies a list of HTTP response statuses considered healthy. If provided, replaces default + * 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open + * semantics of :ref:`Int64Range `. The start and end of each + * range are required. Only statuses in the range [100, 600) are allowed. + */ + 'expected_statuses'?: (_envoy_type_v3_Int64Range)[]; + /** + * Specifies a list of HTTP response statuses considered retriable. If provided, responses in this range + * will count towards the configured :ref:`unhealthy_threshold `, + * but will not result in the host being considered immediately unhealthy. Ranges follow half-open semantics of + * :ref:`Int64Range `. The start and end of each range are required. + * Only statuses in the range [100, 600) are allowed. The :ref:`expected_statuses ` + * field takes precedence for any range overlaps with this field i.e. if status code 200 is both retriable and expected, a 200 response will + * be considered a successful health check. By default all responses not in + * :ref:`expected_statuses ` will result in + * the host being considered immediately unhealthy i.e. if status code 200 is expected and there are no configured retriable statuses, any + * non-200 response will result in the host being marked unhealthy. + */ + 'retriable_statuses'?: (_envoy_type_v3_Int64Range)[]; + /** + * Use specified application protocol for health checks. + */ + 'codec_client_type'?: (_envoy_type_v3_CodecClientType); + /** + * An optional service name parameter which is used to validate the identity of + * the health checked cluster using a :ref:`StringMatcher + * `. See the :ref:`architecture overview + * ` for more information. + */ + 'service_name_matcher'?: (_envoy_type_matcher_v3_StringMatcher | null); + /** + * HTTP Method that will be used for health checking, default is "GET". + * GET, HEAD, POST, PUT, DELETE, OPTIONS, TRACE, PATCH methods are supported, but making request body is not supported. + * CONNECT method is disallowed because it is not appropriate for health check request. + * If a non-200 response is expected by the method, it needs to be set in :ref:`expected_statuses `. + */ + 'method'?: (_envoy_config_core_v3_RequestMethod); +} + +/** + * [#next-free-field: 15] + */ +export interface _envoy_config_core_v3_HealthCheck_HttpHealthCheck__Output { + /** + * The value of the host header in the HTTP health check request. If + * left empty (default value), the name of the cluster this health check is associated + * with will be used. The host header can be customized for a specific endpoint by setting the + * :ref:`hostname ` field. + */ + 'host': (string); + /** + * Specifies the HTTP path that will be requested during health checking. For example + * ``/healthcheck``. + */ + 'path': (string); + /** + * [#not-implemented-hide:] HTTP specific payload. + */ + 'send': (_envoy_config_core_v3_HealthCheck_Payload__Output | null); + /** + * Specifies a list of HTTP expected responses to match in the first ``response_buffer_size`` bytes of the response body. + * If it is set, both the expected response check and status code determine the health check. + * When checking the response, “fuzzy” matching is performed such that each payload block must be found, + * and in the order specified, but not necessarily contiguous. + * + * .. note:: + * + * It is recommended to set ``response_buffer_size`` based on the total Payload size for efficiency. + * The default buffer size is 1024 bytes when it is not set. + */ + 'receive': (_envoy_config_core_v3_HealthCheck_Payload__Output)[]; + /** + * Specifies the size of response buffer in bytes that is used to Payload match. + * The default value is 1024. Setting to 0 implies that the Payload will be matched against the entire response. + */ + 'response_buffer_size': (_google_protobuf_UInt64Value__Output | null); + /** + * Specifies a list of HTTP headers that should be added to each request that is sent to the + * health checked cluster. For more information, including details on header value syntax, see + * the documentation on :ref:`custom request headers + * `. + */ + 'request_headers_to_add': (_envoy_config_core_v3_HeaderValueOption__Output)[]; + /** + * Specifies a list of HTTP headers that should be removed from each request that is sent to the + * health checked cluster. + */ + 'request_headers_to_remove': (string)[]; + /** + * Specifies a list of HTTP response statuses considered healthy. If provided, replaces default + * 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open + * semantics of :ref:`Int64Range `. The start and end of each + * range are required. Only statuses in the range [100, 600) are allowed. + */ + 'expected_statuses': (_envoy_type_v3_Int64Range__Output)[]; + /** + * Specifies a list of HTTP response statuses considered retriable. If provided, responses in this range + * will count towards the configured :ref:`unhealthy_threshold `, + * but will not result in the host being considered immediately unhealthy. Ranges follow half-open semantics of + * :ref:`Int64Range `. The start and end of each range are required. + * Only statuses in the range [100, 600) are allowed. The :ref:`expected_statuses ` + * field takes precedence for any range overlaps with this field i.e. if status code 200 is both retriable and expected, a 200 response will + * be considered a successful health check. By default all responses not in + * :ref:`expected_statuses ` will result in + * the host being considered immediately unhealthy i.e. if status code 200 is expected and there are no configured retriable statuses, any + * non-200 response will result in the host being marked unhealthy. + */ + 'retriable_statuses': (_envoy_type_v3_Int64Range__Output)[]; + /** + * Use specified application protocol for health checks. + */ + 'codec_client_type': (_envoy_type_v3_CodecClientType__Output); + /** + * An optional service name parameter which is used to validate the identity of + * the health checked cluster using a :ref:`StringMatcher + * `. See the :ref:`architecture overview + * ` for more information. + */ + 'service_name_matcher': (_envoy_type_matcher_v3_StringMatcher__Output | null); + /** + * HTTP Method that will be used for health checking, default is "GET". + * GET, HEAD, POST, PUT, DELETE, OPTIONS, TRACE, PATCH methods are supported, but making request body is not supported. + * CONNECT method is disallowed because it is not appropriate for health check request. + * If a non-200 response is expected by the method, it needs to be set in :ref:`expected_statuses `. + */ + 'method': (_envoy_config_core_v3_RequestMethod__Output); +} + +/** + * Describes the encoding of the payload bytes in the payload. + */ +export interface _envoy_config_core_v3_HealthCheck_Payload { + /** + * Hex encoded payload. E.g., "000000FF". + */ + 'text'?: (string); + /** + * Binary payload. + */ + 'binary'?: (Buffer | Uint8Array | string); + 'payload'?: "text"|"binary"; +} + +/** + * Describes the encoding of the payload bytes in the payload. + */ +export interface _envoy_config_core_v3_HealthCheck_Payload__Output { + /** + * Hex encoded payload. E.g., "000000FF". + */ + 'text'?: (string); + /** + * Binary payload. + */ + 'binary'?: (Buffer); + 'payload': "text"|"binary"; +} + +export interface _envoy_config_core_v3_HealthCheck_RedisHealthCheck { + /** + * If set, optionally perform ``EXISTS `` instead of ``PING``. A return value + * from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other + * than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance + * by setting the specified key to any value and waiting for traffic to drain. + */ + 'key'?: (string); +} + +export interface _envoy_config_core_v3_HealthCheck_RedisHealthCheck__Output { + /** + * If set, optionally perform ``EXISTS `` instead of ``PING``. A return value + * from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other + * than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance + * by setting the specified key to any value and waiting for traffic to drain. + */ + 'key': (string); +} + +export interface _envoy_config_core_v3_HealthCheck_TcpHealthCheck { + /** + * Empty payloads imply a connect-only health check. + */ + 'send'?: (_envoy_config_core_v3_HealthCheck_Payload | null); + /** + * When checking the response, “fuzzy” matching is performed such that each + * payload block must be found, and in the order specified, but not + * necessarily contiguous. + */ + 'receive'?: (_envoy_config_core_v3_HealthCheck_Payload)[]; +} + +export interface _envoy_config_core_v3_HealthCheck_TcpHealthCheck__Output { + /** + * Empty payloads imply a connect-only health check. + */ + 'send': (_envoy_config_core_v3_HealthCheck_Payload__Output | null); + /** + * When checking the response, “fuzzy” matching is performed such that each + * payload block must be found, and in the order specified, but not + * necessarily contiguous. + */ + 'receive': (_envoy_config_core_v3_HealthCheck_Payload__Output)[]; +} + +/** + * Health checks occur over the transport socket specified for the cluster. This implies that if a + * cluster is using a TLS-enabled transport socket, the health check will also occur over TLS. + * + * This allows overriding the cluster TLS settings, just for health check connections. + */ +export interface _envoy_config_core_v3_HealthCheck_TlsOptions { + /** + * Specifies the ALPN protocols for health check connections. This is useful if the + * corresponding upstream is using ALPN-based :ref:`FilterChainMatch + * ` along with different protocols for health checks + * versus data connections. If empty, no ALPN protocols will be set on health check connections. + */ + 'alpn_protocols'?: (string)[]; +} + +/** + * Health checks occur over the transport socket specified for the cluster. This implies that if a + * cluster is using a TLS-enabled transport socket, the health check will also occur over TLS. + * + * This allows overriding the cluster TLS settings, just for health check connections. + */ +export interface _envoy_config_core_v3_HealthCheck_TlsOptions__Output { + /** + * Specifies the ALPN protocols for health check connections. This is useful if the + * corresponding upstream is using ALPN-based :ref:`FilterChainMatch + * ` along with different protocols for health checks + * versus data connections. If empty, no ALPN protocols will be set on health check connections. + */ + 'alpn_protocols': (string)[]; +} + +/** + * [#next-free-field: 26] + */ +export interface HealthCheck { + /** + * The time to wait for a health check response. If the timeout is reached the + * health check attempt will be considered a failure. + */ + 'timeout'?: (_google_protobuf_Duration | null); + /** + * The interval between health checks. + */ + 'interval'?: (_google_protobuf_Duration | null); + /** + * An optional jitter amount in milliseconds. If specified, during every + * interval Envoy will add interval_jitter to the wait time. + */ + 'interval_jitter'?: (_google_protobuf_Duration | null); + /** + * The number of unhealthy health checks required before a host is marked + * unhealthy. Note that for ``http`` health checking if a host responds with a code not in + * :ref:`expected_statuses ` + * or :ref:`retriable_statuses `, + * this threshold is ignored and the host is considered immediately unhealthy. + */ + 'unhealthy_threshold'?: (_google_protobuf_UInt32Value | null); + /** + * The number of healthy health checks required before a host is marked + * healthy. Note that during startup, only a single successful health check is + * required to mark a host healthy. + */ + 'healthy_threshold'?: (_google_protobuf_UInt32Value | null); + /** + * [#not-implemented-hide:] Non-serving port for health checking. + */ + 'alt_port'?: (_google_protobuf_UInt32Value | null); + /** + * Reuse health check connection between health checks. Default is true. + */ + 'reuse_connection'?: (_google_protobuf_BoolValue | null); + /** + * HTTP health check. + */ + 'http_health_check'?: (_envoy_config_core_v3_HealthCheck_HttpHealthCheck | null); + /** + * TCP health check. + */ + 'tcp_health_check'?: (_envoy_config_core_v3_HealthCheck_TcpHealthCheck | null); + /** + * gRPC health check. + */ + 'grpc_health_check'?: (_envoy_config_core_v3_HealthCheck_GrpcHealthCheck | null); + /** + * The "no traffic interval" is a special health check interval that is used when a cluster has + * never had traffic routed to it. This lower interval allows cluster information to be kept up to + * date, without sending a potentially large amount of active health checking traffic for no + * reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the + * standard health check interval that is defined. Note that this interval takes precedence over + * any other. + * + * The default value for "no traffic interval" is 60 seconds. + */ + 'no_traffic_interval'?: (_google_protobuf_Duration | null); + /** + * Custom health check. + */ + 'custom_health_check'?: (_envoy_config_core_v3_HealthCheck_CustomHealthCheck | null); + /** + * The "unhealthy interval" is a health check interval that is used for hosts that are marked as + * unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the + * standard health check interval that is defined. + * + * The default value for "unhealthy interval" is the same as "interval". + */ + 'unhealthy_interval'?: (_google_protobuf_Duration | null); + /** + * The "unhealthy edge interval" is a special health check interval that is used for the first + * health check right after a host is marked as unhealthy. For subsequent health checks + * Envoy will shift back to using either "unhealthy interval" if present or the standard health + * check interval that is defined. + * + * The default value for "unhealthy edge interval" is the same as "unhealthy interval". + */ + 'unhealthy_edge_interval'?: (_google_protobuf_Duration | null); + /** + * The "healthy edge interval" is a special health check interval that is used for the first + * health check right after a host is marked as healthy. For subsequent health checks + * Envoy will shift back to using the standard health check interval that is defined. + * + * The default value for "healthy edge interval" is the same as the default interval. + */ + 'healthy_edge_interval'?: (_google_protobuf_Duration | null); + /** + * .. attention:: + * This field is deprecated in favor of the extension + * :ref:`event_logger ` and + * :ref:`event_log_path ` + * in the file sink extension. + * + * Specifies the path to the :ref:`health check event log `. + * @deprecated + */ + 'event_log_path'?: (string); + /** + * An optional jitter amount as a percentage of interval_ms. If specified, + * during every interval Envoy will add ``interval_ms`` * + * ``interval_jitter_percent`` / 100 to the wait time. + * + * If interval_jitter_ms and interval_jitter_percent are both set, both of + * them will be used to increase the wait time. + */ + 'interval_jitter_percent'?: (number); + /** + * If set to true, health check failure events will always be logged. If set to false, only the + * initial health check failure event will be logged. + * The default value is false. + */ + 'always_log_health_check_failures'?: (boolean); + /** + * An optional jitter amount in milliseconds. If specified, Envoy will start health + * checking after for a random time in ms between 0 and initial_jitter. This only + * applies to the first health check. + */ + 'initial_jitter'?: (_google_protobuf_Duration | null); + /** + * This allows overriding the cluster TLS settings, just for health check connections. + */ + 'tls_options'?: (_envoy_config_core_v3_HealthCheck_TlsOptions | null); + /** + * [#not-implemented-hide:] + * The gRPC service for the health check event service. + * If empty, health check events won't be sent to a remote endpoint. + */ + 'event_service'?: (_envoy_config_core_v3_EventServiceConfig | null); + /** + * Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's + * :ref:`tranport socket matches `. + * For example, the following match criteria + * + * .. code-block:: yaml + * + * transport_socket_match_criteria: + * useMTLS: true + * + * Will match the following :ref:`cluster socket match ` + * + * .. code-block:: yaml + * + * transport_socket_matches: + * - name: "useMTLS" + * match: + * useMTLS: true + * transport_socket: + * name: envoy.transport_sockets.tls + * config: { ... } # tls socket configuration + * + * If this field is set, then for health checks it will supersede an entry of ``envoy.transport_socket`` in the + * :ref:`LbEndpoint.Metadata `. + * This allows using different transport socket capabilities for health checking versus proxying to the + * endpoint. + * + * If the key/values pairs specified do not match any + * :ref:`transport socket matches `, + * the cluster's :ref:`transport socket ` + * will be used for health check socket configuration. + */ + 'transport_socket_match_criteria'?: (_google_protobuf_Struct | null); + /** + * The "no traffic healthy interval" is a special health check interval that + * is used for hosts that are currently passing active health checking + * (including new hosts) when the cluster has received no traffic. + * + * This is useful for when we want to send frequent health checks with + * ``no_traffic_interval`` but then revert to lower frequency ``no_traffic_healthy_interval`` once + * a host in the cluster is marked as healthy. + * + * Once a cluster has been used for traffic routing, Envoy will shift back to using the + * standard health check interval that is defined. + * + * If no_traffic_healthy_interval is not set, it will default to the + * no traffic interval and send that interval regardless of health state. + */ + 'no_traffic_healthy_interval'?: (_google_protobuf_Duration | null); + /** + * A list of event log sinks to process the health check event. + * [#extension-category: envoy.health_check.event_sinks] + */ + 'event_logger'?: (_envoy_config_core_v3_TypedExtensionConfig)[]; + 'health_checker'?: "http_health_check"|"tcp_health_check"|"grpc_health_check"|"custom_health_check"; +} + +/** + * [#next-free-field: 26] + */ +export interface HealthCheck__Output { + /** + * The time to wait for a health check response. If the timeout is reached the + * health check attempt will be considered a failure. + */ + 'timeout': (_google_protobuf_Duration__Output | null); + /** + * The interval between health checks. + */ + 'interval': (_google_protobuf_Duration__Output | null); + /** + * An optional jitter amount in milliseconds. If specified, during every + * interval Envoy will add interval_jitter to the wait time. + */ + 'interval_jitter': (_google_protobuf_Duration__Output | null); + /** + * The number of unhealthy health checks required before a host is marked + * unhealthy. Note that for ``http`` health checking if a host responds with a code not in + * :ref:`expected_statuses ` + * or :ref:`retriable_statuses `, + * this threshold is ignored and the host is considered immediately unhealthy. + */ + 'unhealthy_threshold': (_google_protobuf_UInt32Value__Output | null); + /** + * The number of healthy health checks required before a host is marked + * healthy. Note that during startup, only a single successful health check is + * required to mark a host healthy. + */ + 'healthy_threshold': (_google_protobuf_UInt32Value__Output | null); + /** + * [#not-implemented-hide:] Non-serving port for health checking. + */ + 'alt_port': (_google_protobuf_UInt32Value__Output | null); + /** + * Reuse health check connection between health checks. Default is true. + */ + 'reuse_connection': (_google_protobuf_BoolValue__Output | null); + /** + * HTTP health check. + */ + 'http_health_check'?: (_envoy_config_core_v3_HealthCheck_HttpHealthCheck__Output | null); + /** + * TCP health check. + */ + 'tcp_health_check'?: (_envoy_config_core_v3_HealthCheck_TcpHealthCheck__Output | null); + /** + * gRPC health check. + */ + 'grpc_health_check'?: (_envoy_config_core_v3_HealthCheck_GrpcHealthCheck__Output | null); + /** + * The "no traffic interval" is a special health check interval that is used when a cluster has + * never had traffic routed to it. This lower interval allows cluster information to be kept up to + * date, without sending a potentially large amount of active health checking traffic for no + * reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the + * standard health check interval that is defined. Note that this interval takes precedence over + * any other. + * + * The default value for "no traffic interval" is 60 seconds. + */ + 'no_traffic_interval': (_google_protobuf_Duration__Output | null); + /** + * Custom health check. + */ + 'custom_health_check'?: (_envoy_config_core_v3_HealthCheck_CustomHealthCheck__Output | null); + /** + * The "unhealthy interval" is a health check interval that is used for hosts that are marked as + * unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the + * standard health check interval that is defined. + * + * The default value for "unhealthy interval" is the same as "interval". + */ + 'unhealthy_interval': (_google_protobuf_Duration__Output | null); + /** + * The "unhealthy edge interval" is a special health check interval that is used for the first + * health check right after a host is marked as unhealthy. For subsequent health checks + * Envoy will shift back to using either "unhealthy interval" if present or the standard health + * check interval that is defined. + * + * The default value for "unhealthy edge interval" is the same as "unhealthy interval". + */ + 'unhealthy_edge_interval': (_google_protobuf_Duration__Output | null); + /** + * The "healthy edge interval" is a special health check interval that is used for the first + * health check right after a host is marked as healthy. For subsequent health checks + * Envoy will shift back to using the standard health check interval that is defined. + * + * The default value for "healthy edge interval" is the same as the default interval. + */ + 'healthy_edge_interval': (_google_protobuf_Duration__Output | null); + /** + * .. attention:: + * This field is deprecated in favor of the extension + * :ref:`event_logger ` and + * :ref:`event_log_path ` + * in the file sink extension. + * + * Specifies the path to the :ref:`health check event log `. + * @deprecated + */ + 'event_log_path': (string); + /** + * An optional jitter amount as a percentage of interval_ms. If specified, + * during every interval Envoy will add ``interval_ms`` * + * ``interval_jitter_percent`` / 100 to the wait time. + * + * If interval_jitter_ms and interval_jitter_percent are both set, both of + * them will be used to increase the wait time. + */ + 'interval_jitter_percent': (number); + /** + * If set to true, health check failure events will always be logged. If set to false, only the + * initial health check failure event will be logged. + * The default value is false. + */ + 'always_log_health_check_failures': (boolean); + /** + * An optional jitter amount in milliseconds. If specified, Envoy will start health + * checking after for a random time in ms between 0 and initial_jitter. This only + * applies to the first health check. + */ + 'initial_jitter': (_google_protobuf_Duration__Output | null); + /** + * This allows overriding the cluster TLS settings, just for health check connections. + */ + 'tls_options': (_envoy_config_core_v3_HealthCheck_TlsOptions__Output | null); + /** + * [#not-implemented-hide:] + * The gRPC service for the health check event service. + * If empty, health check events won't be sent to a remote endpoint. + */ + 'event_service': (_envoy_config_core_v3_EventServiceConfig__Output | null); + /** + * Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's + * :ref:`tranport socket matches `. + * For example, the following match criteria + * + * .. code-block:: yaml + * + * transport_socket_match_criteria: + * useMTLS: true + * + * Will match the following :ref:`cluster socket match ` + * + * .. code-block:: yaml + * + * transport_socket_matches: + * - name: "useMTLS" + * match: + * useMTLS: true + * transport_socket: + * name: envoy.transport_sockets.tls + * config: { ... } # tls socket configuration + * + * If this field is set, then for health checks it will supersede an entry of ``envoy.transport_socket`` in the + * :ref:`LbEndpoint.Metadata `. + * This allows using different transport socket capabilities for health checking versus proxying to the + * endpoint. + * + * If the key/values pairs specified do not match any + * :ref:`transport socket matches `, + * the cluster's :ref:`transport socket ` + * will be used for health check socket configuration. + */ + 'transport_socket_match_criteria': (_google_protobuf_Struct__Output | null); + /** + * The "no traffic healthy interval" is a special health check interval that + * is used for hosts that are currently passing active health checking + * (including new hosts) when the cluster has received no traffic. + * + * This is useful for when we want to send frequent health checks with + * ``no_traffic_interval`` but then revert to lower frequency ``no_traffic_healthy_interval`` once + * a host in the cluster is marked as healthy. + * + * Once a cluster has been used for traffic routing, Envoy will shift back to using the + * standard health check interval that is defined. + * + * If no_traffic_healthy_interval is not set, it will default to the + * no traffic interval and send that interval regardless of health state. + */ + 'no_traffic_healthy_interval': (_google_protobuf_Duration__Output | null); + /** + * A list of event log sinks to process the health check event. + * [#extension-category: envoy.health_check.event_sinks] + */ + 'event_logger': (_envoy_config_core_v3_TypedExtensionConfig__Output)[]; + 'health_checker': "http_health_check"|"tcp_health_check"|"grpc_health_check"|"custom_health_check"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/HealthStatus.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/HealthStatus.ts new file mode 100644 index 000000000..54298f59b --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/HealthStatus.ts @@ -0,0 +1,81 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/health_check.proto + +/** + * Endpoint health status. + */ +export const HealthStatus = { + /** + * The health status is not known. This is interpreted by Envoy as ``HEALTHY``. + */ + UNKNOWN: 'UNKNOWN', + /** + * Healthy. + */ + HEALTHY: 'HEALTHY', + /** + * Unhealthy. + */ + UNHEALTHY: 'UNHEALTHY', + /** + * Connection draining in progress. E.g., + * ``_ + * or + * ``_. + * This is interpreted by Envoy as ``UNHEALTHY``. + */ + DRAINING: 'DRAINING', + /** + * Health check timed out. This is part of HDS and is interpreted by Envoy as + * ``UNHEALTHY``. + */ + TIMEOUT: 'TIMEOUT', + /** + * Degraded. + */ + DEGRADED: 'DEGRADED', +} as const; + +/** + * Endpoint health status. + */ +export type HealthStatus = + /** + * The health status is not known. This is interpreted by Envoy as ``HEALTHY``. + */ + | 'UNKNOWN' + | 0 + /** + * Healthy. + */ + | 'HEALTHY' + | 1 + /** + * Unhealthy. + */ + | 'UNHEALTHY' + | 2 + /** + * Connection draining in progress. E.g., + * ``_ + * or + * ``_. + * This is interpreted by Envoy as ``UNHEALTHY``. + */ + | 'DRAINING' + | 3 + /** + * Health check timed out. This is part of HDS and is interpreted by Envoy as + * ``UNHEALTHY``. + */ + | 'TIMEOUT' + | 4 + /** + * Degraded. + */ + | 'DEGRADED' + | 5 + +/** + * Endpoint health status. + */ +export type HealthStatus__Output = typeof HealthStatus[keyof typeof HealthStatus] diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/HealthStatusSet.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/HealthStatusSet.ts new file mode 100644 index 000000000..c94bf049c --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/HealthStatusSet.ts @@ -0,0 +1,17 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/health_check.proto + +import type { HealthStatus as _envoy_config_core_v3_HealthStatus, HealthStatus__Output as _envoy_config_core_v3_HealthStatus__Output } from '../../../../envoy/config/core/v3/HealthStatus'; + +export interface HealthStatusSet { + /** + * An order-independent set of health status. + */ + 'statuses'?: (_envoy_config_core_v3_HealthStatus)[]; +} + +export interface HealthStatusSet__Output { + /** + * An order-independent set of health status. + */ + 'statuses': (_envoy_config_core_v3_HealthStatus__Output)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/Http1ProtocolOptions.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/Http1ProtocolOptions.ts new file mode 100644 index 000000000..d141a946c --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/Http1ProtocolOptions.ts @@ -0,0 +1,244 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/protocol.proto + +import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; +import type { TypedExtensionConfig as _envoy_config_core_v3_TypedExtensionConfig, TypedExtensionConfig__Output as _envoy_config_core_v3_TypedExtensionConfig__Output } from '../../../../envoy/config/core/v3/TypedExtensionConfig'; + +/** + * [#next-free-field: 9] + */ +export interface _envoy_config_core_v3_Http1ProtocolOptions_HeaderKeyFormat { + /** + * Formats the header by proper casing words: the first character and any character following + * a special character will be capitalized if it's an alpha character. For example, + * "content-type" becomes "Content-Type", and "foo$b#$are" becomes "Foo$B#$Are". + * Note that while this results in most headers following conventional casing, certain headers + * are not covered. For example, the "TE" header will be formatted as "Te". + */ + 'proper_case_words'?: (_envoy_config_core_v3_Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords | null); + /** + * Configuration for stateful formatter extensions that allow using received headers to + * affect the output of encoding headers. E.g., preserving case during proxying. + * [#extension-category: envoy.http.stateful_header_formatters] + */ + 'stateful_formatter'?: (_envoy_config_core_v3_TypedExtensionConfig | null); + 'header_format'?: "proper_case_words"|"stateful_formatter"; +} + +/** + * [#next-free-field: 9] + */ +export interface _envoy_config_core_v3_Http1ProtocolOptions_HeaderKeyFormat__Output { + /** + * Formats the header by proper casing words: the first character and any character following + * a special character will be capitalized if it's an alpha character. For example, + * "content-type" becomes "Content-Type", and "foo$b#$are" becomes "Foo$B#$Are". + * Note that while this results in most headers following conventional casing, certain headers + * are not covered. For example, the "TE" header will be formatted as "Te". + */ + 'proper_case_words'?: (_envoy_config_core_v3_Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords__Output | null); + /** + * Configuration for stateful formatter extensions that allow using received headers to + * affect the output of encoding headers. E.g., preserving case during proxying. + * [#extension-category: envoy.http.stateful_header_formatters] + */ + 'stateful_formatter'?: (_envoy_config_core_v3_TypedExtensionConfig__Output | null); + 'header_format': "proper_case_words"|"stateful_formatter"; +} + +export interface _envoy_config_core_v3_Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords { +} + +export interface _envoy_config_core_v3_Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords__Output { +} + +/** + * [#next-free-field: 11] + */ +export interface Http1ProtocolOptions { + /** + * Handle HTTP requests with absolute URLs in the requests. These requests + * are generally sent by clients to forward/explicit proxies. This allows clients to configure + * envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the + * ``http_proxy`` environment variable. + */ + 'allow_absolute_url'?: (_google_protobuf_BoolValue | null); + /** + * Handle incoming HTTP/1.0 and HTTP 0.9 requests. + * This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1 + * style connect logic, dechunking, and handling lack of client host iff + * ``default_host_for_http_10`` is configured. + */ + 'accept_http_10'?: (boolean); + /** + * A default host for HTTP/1.0 requests. This is highly suggested if ``accept_http_10`` is true as + * Envoy does not otherwise support HTTP/1.0 without a Host header. + * This is a no-op if ``accept_http_10`` is not true. + */ + 'default_host_for_http_10'?: (string); + /** + * Describes how the keys for response headers should be formatted. By default, all header keys + * are lower cased. + */ + 'header_key_format'?: (_envoy_config_core_v3_Http1ProtocolOptions_HeaderKeyFormat | null); + /** + * Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers. + * + * .. attention:: + * + * Note that this only happens when Envoy is chunk encoding which occurs when: + * - The request is HTTP/1.1. + * - Is neither a HEAD only request nor a HTTP Upgrade. + * - Not a response to a HEAD request. + * - The content length header is not present. + */ + 'enable_trailers'?: (boolean); + /** + * Allows Envoy to process requests/responses with both ``Content-Length`` and ``Transfer-Encoding`` + * headers set. By default such messages are rejected, but if option is enabled - Envoy will + * remove Content-Length header and process message. + * See `RFC7230, sec. 3.3.3 `_ for details. + * + * .. attention:: + * Enabling this option might lead to request smuggling vulnerability, especially if traffic + * is proxied via multiple layers of proxies. + * [#comment:TODO: This field is ignored when the + * :ref:`header validation configuration ` + * is present.] + */ + 'allow_chunked_length'?: (boolean); + /** + * Allows invalid HTTP messaging. When this option is false, then Envoy will terminate + * HTTP/1.1 connections upon receiving an invalid HTTP message. However, + * when this option is true, then Envoy will leave the HTTP/1.1 connection + * open where possible. + * If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging + * `. + */ + 'override_stream_error_on_invalid_http_message'?: (_google_protobuf_BoolValue | null); + /** + * Allows sending fully qualified URLs when proxying the first line of the + * response. By default, Envoy will only send the path components in the first line. + * If this is true, Envoy will create a fully qualified URI composing scheme + * (inferred if not present), host (from the host/:authority header) and path + * (from first line or :path header). + */ + 'send_fully_qualified_url'?: (boolean); + /** + * [#not-implemented-hide:] Hiding so that field can be removed after BalsaParser is rolled out. + * If set, force HTTP/1 parser: BalsaParser if true, http-parser if false. + * If unset, HTTP/1 parser is selected based on + * envoy.reloadable_features.http1_use_balsa_parser. + * See issue #21245. + */ + 'use_balsa_parser'?: (_google_protobuf_BoolValue | null); + /** + * [#not-implemented-hide:] Hiding so that field can be removed. + * If true, and BalsaParser is used (either `use_balsa_parser` above is true, + * or `envoy.reloadable_features.http1_use_balsa_parser` is true and + * `use_balsa_parser` is unset), then every non-empty method with only valid + * characters is accepted. Otherwise, methods not on the hard-coded list are + * rejected. + * Once UHV is enabled, this field should be removed, and BalsaParser should + * allow any method. UHV validates the method, rejecting empty string or + * invalid characters, and provides :ref:`restrict_http_methods + * ` + * to reject custom methods. + */ + 'allow_custom_methods'?: (boolean); +} + +/** + * [#next-free-field: 11] + */ +export interface Http1ProtocolOptions__Output { + /** + * Handle HTTP requests with absolute URLs in the requests. These requests + * are generally sent by clients to forward/explicit proxies. This allows clients to configure + * envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the + * ``http_proxy`` environment variable. + */ + 'allow_absolute_url': (_google_protobuf_BoolValue__Output | null); + /** + * Handle incoming HTTP/1.0 and HTTP 0.9 requests. + * This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1 + * style connect logic, dechunking, and handling lack of client host iff + * ``default_host_for_http_10`` is configured. + */ + 'accept_http_10': (boolean); + /** + * A default host for HTTP/1.0 requests. This is highly suggested if ``accept_http_10`` is true as + * Envoy does not otherwise support HTTP/1.0 without a Host header. + * This is a no-op if ``accept_http_10`` is not true. + */ + 'default_host_for_http_10': (string); + /** + * Describes how the keys for response headers should be formatted. By default, all header keys + * are lower cased. + */ + 'header_key_format': (_envoy_config_core_v3_Http1ProtocolOptions_HeaderKeyFormat__Output | null); + /** + * Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers. + * + * .. attention:: + * + * Note that this only happens when Envoy is chunk encoding which occurs when: + * - The request is HTTP/1.1. + * - Is neither a HEAD only request nor a HTTP Upgrade. + * - Not a response to a HEAD request. + * - The content length header is not present. + */ + 'enable_trailers': (boolean); + /** + * Allows Envoy to process requests/responses with both ``Content-Length`` and ``Transfer-Encoding`` + * headers set. By default such messages are rejected, but if option is enabled - Envoy will + * remove Content-Length header and process message. + * See `RFC7230, sec. 3.3.3 `_ for details. + * + * .. attention:: + * Enabling this option might lead to request smuggling vulnerability, especially if traffic + * is proxied via multiple layers of proxies. + * [#comment:TODO: This field is ignored when the + * :ref:`header validation configuration ` + * is present.] + */ + 'allow_chunked_length': (boolean); + /** + * Allows invalid HTTP messaging. When this option is false, then Envoy will terminate + * HTTP/1.1 connections upon receiving an invalid HTTP message. However, + * when this option is true, then Envoy will leave the HTTP/1.1 connection + * open where possible. + * If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging + * `. + */ + 'override_stream_error_on_invalid_http_message': (_google_protobuf_BoolValue__Output | null); + /** + * Allows sending fully qualified URLs when proxying the first line of the + * response. By default, Envoy will only send the path components in the first line. + * If this is true, Envoy will create a fully qualified URI composing scheme + * (inferred if not present), host (from the host/:authority header) and path + * (from first line or :path header). + */ + 'send_fully_qualified_url': (boolean); + /** + * [#not-implemented-hide:] Hiding so that field can be removed after BalsaParser is rolled out. + * If set, force HTTP/1 parser: BalsaParser if true, http-parser if false. + * If unset, HTTP/1 parser is selected based on + * envoy.reloadable_features.http1_use_balsa_parser. + * See issue #21245. + */ + 'use_balsa_parser': (_google_protobuf_BoolValue__Output | null); + /** + * [#not-implemented-hide:] Hiding so that field can be removed. + * If true, and BalsaParser is used (either `use_balsa_parser` above is true, + * or `envoy.reloadable_features.http1_use_balsa_parser` is true and + * `use_balsa_parser` is unset), then every non-empty method with only valid + * characters is accepted. Otherwise, methods not on the hard-coded list are + * rejected. + * Once UHV is enabled, this field should be removed, and BalsaParser should + * allow any method. UHV validates the method, rejecting empty string or + * invalid characters, and provides :ref:`restrict_http_methods + * ` + * to reject custom methods. + */ + 'allow_custom_methods': (boolean); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/Http2ProtocolOptions.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/Http2ProtocolOptions.ts similarity index 57% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/Http2ProtocolOptions.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/Http2ProtocolOptions.ts index 893f61a15..9e0ae3d6e 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/Http2ProtocolOptions.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/Http2ProtocolOptions.ts @@ -1,39 +1,41 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/protocol.proto +// Original file: deps/envoy-api/envoy/config/core/v3/protocol.proto import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; +import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; +import type { KeepaliveSettings as _envoy_config_core_v3_KeepaliveSettings, KeepaliveSettings__Output as _envoy_config_core_v3_KeepaliveSettings__Output } from '../../../../envoy/config/core/v3/KeepaliveSettings'; /** * Defines a parameter to be sent in the SETTINGS frame. * See `RFC7540, sec. 6.5.1 `_ for details. */ -export interface _envoy_api_v2_core_Http2ProtocolOptions_SettingsParameter { +export interface _envoy_config_core_v3_Http2ProtocolOptions_SettingsParameter { /** * The 16 bit parameter identifier. */ - 'identifier'?: (_google_protobuf_UInt32Value); + 'identifier'?: (_google_protobuf_UInt32Value | null); /** * The 32 bit parameter value. */ - 'value'?: (_google_protobuf_UInt32Value); + 'value'?: (_google_protobuf_UInt32Value | null); } /** * Defines a parameter to be sent in the SETTINGS frame. * See `RFC7540, sec. 6.5.1 `_ for details. */ -export interface _envoy_api_v2_core_Http2ProtocolOptions_SettingsParameter__Output { +export interface _envoy_config_core_v3_Http2ProtocolOptions_SettingsParameter__Output { /** * The 16 bit parameter identifier. */ - 'identifier'?: (_google_protobuf_UInt32Value__Output); + 'identifier': (_google_protobuf_UInt32Value__Output | null); /** * The 32 bit parameter value. */ - 'value'?: (_google_protobuf_UInt32Value__Output); + 'value': (_google_protobuf_UInt32Value__Output | null); } /** - * [#next-free-field: 14] + * [#next-free-field: 17] */ export interface Http2ProtocolOptions { /** @@ -42,7 +44,7 @@ export interface Http2ProtocolOptions { * range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header * compression. */ - 'hpack_table_size'?: (_google_protobuf_UInt32Value); + 'hpack_table_size'?: (_google_protobuf_UInt32Value | null); /** * `Maximum concurrent streams `_ * allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) @@ -51,8 +53,12 @@ export interface Http2ProtocolOptions { * For upstream connections, this also limits how many streams Envoy will initiate concurrently * on a single connection. If the limit is reached, Envoy may queue requests or establish * additional connections (as allowed per circuit breaker limits). + * + * This acts as an upper bound: Envoy will lower the max concurrent streams allowed on a given + * connection based on upstream settings. Config dumps will reflect the configured upper bound, + * not the per-connection negotiated limits. */ - 'max_concurrent_streams'?: (_google_protobuf_UInt32Value); + 'max_concurrent_streams'?: (_google_protobuf_UInt32Value | null); /** * `Initial stream-level flow-control window * `_ size. Valid values range from 65535 @@ -66,12 +72,12 @@ export interface Http2ProtocolOptions { * HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to * stop the flow of data to the codec buffers. */ - 'initial_stream_window_size'?: (_google_protobuf_UInt32Value); + 'initial_stream_window_size'?: (_google_protobuf_UInt32Value | null); /** - * Similar to *initial_stream_window_size*, but for connection-level flow-control - * window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*. + * Similar to ``initial_stream_window_size``, but for connection-level flow-control + * window. Currently, this has the same minimum/maximum/default as ``initial_stream_window_size``. */ - 'initial_connection_window_size'?: (_google_protobuf_UInt32Value); + 'initial_connection_window_size'?: (_google_protobuf_UInt32Value | null); /** * Allows proxying Websocket and other upgrades over H2 connect. */ @@ -81,7 +87,7 @@ export interface Http2ProtocolOptions { * Still under implementation. DO NOT USE. * * Allows metadata. See [metadata - * docs](https://github.com/envoyproxy/envoy/blob/master/source/docs/h2_metadata.md) for more + * docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more * information. */ 'allow_metadata'?: (boolean); @@ -90,18 +96,16 @@ export interface Http2ProtocolOptions { * be written into the socket). Exceeding this limit triggers flood mitigation and connection is * terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due * to flood mitigation. The default limit is 10000. - * [#comment:TODO: implement same limits for upstream outbound frames as well.] */ - 'max_outbound_frames'?: (_google_protobuf_UInt32Value); + 'max_outbound_frames'?: (_google_protobuf_UInt32Value | null); /** * Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM, * preventing high memory utilization when receiving continuous stream of these frames. Exceeding * this limit triggers flood mitigation and connection is terminated. The * ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood * mitigation. The default limit is 1000. - * [#comment:TODO: implement same limits for upstream outbound frames as well.] */ - 'max_outbound_control_frames'?: (_google_protobuf_UInt32Value); + 'max_outbound_control_frames'?: (_google_protobuf_UInt32Value | null); /** * Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an * empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but @@ -109,42 +113,53 @@ export interface Http2ProtocolOptions { * stat tracks the number of connections terminated due to flood mitigation. * Setting this to 0 will terminate connection upon receiving first frame with an empty payload * and no end stream flag. The default limit is 1. - * [#comment:TODO: implement same limits for upstream inbound frames as well.] */ - 'max_consecutive_inbound_frames_with_empty_payload'?: (_google_protobuf_UInt32Value); + 'max_consecutive_inbound_frames_with_empty_payload'?: (_google_protobuf_UInt32Value | null); /** * Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number * of PRIORITY frames received over the lifetime of connection exceeds the value calculated * using this formula:: * - * max_inbound_priority_frames_per_stream * (1 + inbound_streams) + * ``max_inbound_priority_frames_per_stream`` * (1 + ``opened_streams``) * - * the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks + * the connection is terminated. For downstream connections the ``opened_streams`` is incremented when + * Envoy receives complete response headers from the upstream server. For upstream connection the + * ``opened_streams`` is incremented when Envoy send the HEADERS frame for a new stream. The + * ``http2.inbound_priority_frames_flood`` stat tracks * the number of connections terminated due to flood mitigation. The default limit is 100. - * [#comment:TODO: implement same limits for upstream inbound frames as well.] */ - 'max_inbound_priority_frames_per_stream'?: (_google_protobuf_UInt32Value); + 'max_inbound_priority_frames_per_stream'?: (_google_protobuf_UInt32Value | null); /** * Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number * of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated * using this formula:: * - * 1 + 2 * (inbound_streams + - * max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames) + * 5 + 2 * (``opened_streams`` + + * ``max_inbound_window_update_frames_per_data_frame_sent`` * ``outbound_data_frames``) * - * the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks - * the number of connections terminated due to flood mitigation. The default limit is 10. + * the connection is terminated. For downstream connections the ``opened_streams`` is incremented when + * Envoy receives complete response headers from the upstream server. For upstream connections the + * ``opened_streams`` is incremented when Envoy sends the HEADERS frame for a new stream. The + * ``http2.inbound_priority_frames_flood`` stat tracks the number of connections terminated due to + * flood mitigation. The default max_inbound_window_update_frames_per_data_frame_sent value is 10. * Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control, * but more complex implementations that try to estimate available bandwidth require at least 2. - * [#comment:TODO: implement same limits for upstream inbound frames as well.] */ - 'max_inbound_window_update_frames_per_data_frame_sent'?: (_google_protobuf_UInt32Value); + 'max_inbound_window_update_frames_per_data_frame_sent'?: (_google_protobuf_UInt32Value | null); /** * Allows invalid HTTP messaging and headers. When this option is disabled (default), then * the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, * when this option is enabled, only the offending stream is terminated. * + * This is overridden by HCM :ref:`stream_error_on_invalid_http_messaging + * ` + * iff present. + * + * This is deprecated in favor of :ref:`override_stream_error_on_invalid_http_message + * ` + * * See `RFC7540, sec. 8.1 `_ for details. + * @deprecated */ 'stream_error_on_invalid_http_messaging'?: (boolean); /** @@ -175,11 +190,33 @@ export interface Http2ProtocolOptions { * `_ for * standardized identifiers. */ - 'custom_settings_parameters'?: (_envoy_api_v2_core_Http2ProtocolOptions_SettingsParameter)[]; + 'custom_settings_parameters'?: (_envoy_config_core_v3_Http2ProtocolOptions_SettingsParameter)[]; + /** + * Allows invalid HTTP messaging and headers. When this option is disabled (default), then + * the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, + * when this option is enabled, only the offending stream is terminated. + * + * This overrides any HCM :ref:`stream_error_on_invalid_http_messaging + * ` + * + * See `RFC7540, sec. 8.1 `_ for details. + */ + 'override_stream_error_on_invalid_http_message'?: (_google_protobuf_BoolValue | null); + /** + * Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer + * does not respond within the configured timeout, the connection will be aborted. + */ + 'connection_keepalive'?: (_envoy_config_core_v3_KeepaliveSettings | null); + /** + * [#not-implemented-hide:] Hiding so that the field can be removed after oghttp2 is rolled out. + * If set, force use of a particular HTTP/2 codec: oghttp2 if true, nghttp2 if false. + * If unset, HTTP/2 codec is selected based on envoy.reloadable_features.http2_use_oghttp2. + */ + 'use_oghttp2_codec'?: (_google_protobuf_BoolValue | null); } /** - * [#next-free-field: 14] + * [#next-free-field: 17] */ export interface Http2ProtocolOptions__Output { /** @@ -188,7 +225,7 @@ export interface Http2ProtocolOptions__Output { * range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header * compression. */ - 'hpack_table_size'?: (_google_protobuf_UInt32Value__Output); + 'hpack_table_size': (_google_protobuf_UInt32Value__Output | null); /** * `Maximum concurrent streams `_ * allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) @@ -197,8 +234,12 @@ export interface Http2ProtocolOptions__Output { * For upstream connections, this also limits how many streams Envoy will initiate concurrently * on a single connection. If the limit is reached, Envoy may queue requests or establish * additional connections (as allowed per circuit breaker limits). + * + * This acts as an upper bound: Envoy will lower the max concurrent streams allowed on a given + * connection based on upstream settings. Config dumps will reflect the configured upper bound, + * not the per-connection negotiated limits. */ - 'max_concurrent_streams'?: (_google_protobuf_UInt32Value__Output); + 'max_concurrent_streams': (_google_protobuf_UInt32Value__Output | null); /** * `Initial stream-level flow-control window * `_ size. Valid values range from 65535 @@ -212,12 +253,12 @@ export interface Http2ProtocolOptions__Output { * HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to * stop the flow of data to the codec buffers. */ - 'initial_stream_window_size'?: (_google_protobuf_UInt32Value__Output); + 'initial_stream_window_size': (_google_protobuf_UInt32Value__Output | null); /** - * Similar to *initial_stream_window_size*, but for connection-level flow-control - * window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*. + * Similar to ``initial_stream_window_size``, but for connection-level flow-control + * window. Currently, this has the same minimum/maximum/default as ``initial_stream_window_size``. */ - 'initial_connection_window_size'?: (_google_protobuf_UInt32Value__Output); + 'initial_connection_window_size': (_google_protobuf_UInt32Value__Output | null); /** * Allows proxying Websocket and other upgrades over H2 connect. */ @@ -227,7 +268,7 @@ export interface Http2ProtocolOptions__Output { * Still under implementation. DO NOT USE. * * Allows metadata. See [metadata - * docs](https://github.com/envoyproxy/envoy/blob/master/source/docs/h2_metadata.md) for more + * docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more * information. */ 'allow_metadata': (boolean); @@ -236,18 +277,16 @@ export interface Http2ProtocolOptions__Output { * be written into the socket). Exceeding this limit triggers flood mitigation and connection is * terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due * to flood mitigation. The default limit is 10000. - * [#comment:TODO: implement same limits for upstream outbound frames as well.] */ - 'max_outbound_frames'?: (_google_protobuf_UInt32Value__Output); + 'max_outbound_frames': (_google_protobuf_UInt32Value__Output | null); /** * Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM, * preventing high memory utilization when receiving continuous stream of these frames. Exceeding * this limit triggers flood mitigation and connection is terminated. The * ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood * mitigation. The default limit is 1000. - * [#comment:TODO: implement same limits for upstream outbound frames as well.] */ - 'max_outbound_control_frames'?: (_google_protobuf_UInt32Value__Output); + 'max_outbound_control_frames': (_google_protobuf_UInt32Value__Output | null); /** * Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an * empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but @@ -255,42 +294,53 @@ export interface Http2ProtocolOptions__Output { * stat tracks the number of connections terminated due to flood mitigation. * Setting this to 0 will terminate connection upon receiving first frame with an empty payload * and no end stream flag. The default limit is 1. - * [#comment:TODO: implement same limits for upstream inbound frames as well.] */ - 'max_consecutive_inbound_frames_with_empty_payload'?: (_google_protobuf_UInt32Value__Output); + 'max_consecutive_inbound_frames_with_empty_payload': (_google_protobuf_UInt32Value__Output | null); /** * Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number * of PRIORITY frames received over the lifetime of connection exceeds the value calculated * using this formula:: * - * max_inbound_priority_frames_per_stream * (1 + inbound_streams) + * ``max_inbound_priority_frames_per_stream`` * (1 + ``opened_streams``) * - * the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks + * the connection is terminated. For downstream connections the ``opened_streams`` is incremented when + * Envoy receives complete response headers from the upstream server. For upstream connection the + * ``opened_streams`` is incremented when Envoy send the HEADERS frame for a new stream. The + * ``http2.inbound_priority_frames_flood`` stat tracks * the number of connections terminated due to flood mitigation. The default limit is 100. - * [#comment:TODO: implement same limits for upstream inbound frames as well.] */ - 'max_inbound_priority_frames_per_stream'?: (_google_protobuf_UInt32Value__Output); + 'max_inbound_priority_frames_per_stream': (_google_protobuf_UInt32Value__Output | null); /** * Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number * of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated * using this formula:: * - * 1 + 2 * (inbound_streams + - * max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames) + * 5 + 2 * (``opened_streams`` + + * ``max_inbound_window_update_frames_per_data_frame_sent`` * ``outbound_data_frames``) * - * the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks - * the number of connections terminated due to flood mitigation. The default limit is 10. + * the connection is terminated. For downstream connections the ``opened_streams`` is incremented when + * Envoy receives complete response headers from the upstream server. For upstream connections the + * ``opened_streams`` is incremented when Envoy sends the HEADERS frame for a new stream. The + * ``http2.inbound_priority_frames_flood`` stat tracks the number of connections terminated due to + * flood mitigation. The default max_inbound_window_update_frames_per_data_frame_sent value is 10. * Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control, * but more complex implementations that try to estimate available bandwidth require at least 2. - * [#comment:TODO: implement same limits for upstream inbound frames as well.] */ - 'max_inbound_window_update_frames_per_data_frame_sent'?: (_google_protobuf_UInt32Value__Output); + 'max_inbound_window_update_frames_per_data_frame_sent': (_google_protobuf_UInt32Value__Output | null); /** * Allows invalid HTTP messaging and headers. When this option is disabled (default), then * the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, * when this option is enabled, only the offending stream is terminated. * + * This is overridden by HCM :ref:`stream_error_on_invalid_http_messaging + * ` + * iff present. + * + * This is deprecated in favor of :ref:`override_stream_error_on_invalid_http_message + * ` + * * See `RFC7540, sec. 8.1 `_ for details. + * @deprecated */ 'stream_error_on_invalid_http_messaging': (boolean); /** @@ -321,5 +371,27 @@ export interface Http2ProtocolOptions__Output { * `_ for * standardized identifiers. */ - 'custom_settings_parameters': (_envoy_api_v2_core_Http2ProtocolOptions_SettingsParameter__Output)[]; + 'custom_settings_parameters': (_envoy_config_core_v3_Http2ProtocolOptions_SettingsParameter__Output)[]; + /** + * Allows invalid HTTP messaging and headers. When this option is disabled (default), then + * the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, + * when this option is enabled, only the offending stream is terminated. + * + * This overrides any HCM :ref:`stream_error_on_invalid_http_messaging + * ` + * + * See `RFC7540, sec. 8.1 `_ for details. + */ + 'override_stream_error_on_invalid_http_message': (_google_protobuf_BoolValue__Output | null); + /** + * Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer + * does not respond within the configured timeout, the connection will be aborted. + */ + 'connection_keepalive': (_envoy_config_core_v3_KeepaliveSettings__Output | null); + /** + * [#not-implemented-hide:] Hiding so that the field can be removed after oghttp2 is rolled out. + * If set, force use of a particular HTTP/2 codec: oghttp2 if true, nghttp2 if false. + * If unset, HTTP/2 codec is selected based on envoy.reloadable_features.http2_use_oghttp2. + */ + 'use_oghttp2_codec': (_google_protobuf_BoolValue__Output | null); } diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/Http3ProtocolOptions.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/Http3ProtocolOptions.ts new file mode 100644 index 000000000..51b31b8e7 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/Http3ProtocolOptions.ts @@ -0,0 +1,56 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/protocol.proto + +import type { QuicProtocolOptions as _envoy_config_core_v3_QuicProtocolOptions, QuicProtocolOptions__Output as _envoy_config_core_v3_QuicProtocolOptions__Output } from '../../../../envoy/config/core/v3/QuicProtocolOptions'; +import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; + +/** + * A message which allows using HTTP/3. + * [#next-free-field: 6] + */ +export interface Http3ProtocolOptions { + 'quic_protocol_options'?: (_envoy_config_core_v3_QuicProtocolOptions | null); + /** + * Allows invalid HTTP messaging and headers. When this option is disabled (default), then + * the whole HTTP/3 connection is terminated upon receiving invalid HEADERS frame. However, + * when this option is enabled, only the offending stream is terminated. + * + * If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging + * `. + */ + 'override_stream_error_on_invalid_http_message'?: (_google_protobuf_BoolValue | null); + /** + * Allows proxying Websocket and other upgrades over HTTP/3 CONNECT using + * the header mechanisms from the `HTTP/2 extended connect RFC + * `_ + * and settings `proposed for HTTP/3 + * `_ + * Note that HTTP/3 CONNECT is not yet an RFC. + */ + 'allow_extended_connect'?: (boolean); +} + +/** + * A message which allows using HTTP/3. + * [#next-free-field: 6] + */ +export interface Http3ProtocolOptions__Output { + 'quic_protocol_options': (_envoy_config_core_v3_QuicProtocolOptions__Output | null); + /** + * Allows invalid HTTP messaging and headers. When this option is disabled (default), then + * the whole HTTP/3 connection is terminated upon receiving invalid HEADERS frame. However, + * when this option is enabled, only the offending stream is terminated. + * + * If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging + * `. + */ + 'override_stream_error_on_invalid_http_message': (_google_protobuf_BoolValue__Output | null); + /** + * Allows proxying Websocket and other upgrades over HTTP/3 CONNECT using + * the header mechanisms from the `HTTP/2 extended connect RFC + * `_ + * and settings `proposed for HTTP/3 + * `_ + * Note that HTTP/3 CONNECT is not yet an RFC. + */ + 'allow_extended_connect': (boolean); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/HttpProtocolOptions.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/HttpProtocolOptions.ts new file mode 100644 index 000000000..dfa800c3b --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/HttpProtocolOptions.ts @@ -0,0 +1,189 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/protocol.proto + +import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; + +// Original file: deps/envoy-api/envoy/config/core/v3/protocol.proto + +/** + * Action to take when Envoy receives client request with header names containing underscore + * characters. + * Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented + * as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore + * characters. + */ +export const _envoy_config_core_v3_HttpProtocolOptions_HeadersWithUnderscoresAction = { + /** + * Allow headers with underscores. This is the default behavior. + */ + ALLOW: 'ALLOW', + /** + * Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests + * end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter + * is incremented for each rejected request. + */ + REJECT_REQUEST: 'REJECT_REQUEST', + /** + * Drop the client header with name containing underscores. The header is dropped before the filter chain is + * invoked and as such filters will not see dropped headers. The + * "httpN.dropped_headers_with_underscores" is incremented for each dropped header. + */ + DROP_HEADER: 'DROP_HEADER', +} as const; + +/** + * Action to take when Envoy receives client request with header names containing underscore + * characters. + * Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented + * as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore + * characters. + */ +export type _envoy_config_core_v3_HttpProtocolOptions_HeadersWithUnderscoresAction = + /** + * Allow headers with underscores. This is the default behavior. + */ + | 'ALLOW' + | 0 + /** + * Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests + * end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter + * is incremented for each rejected request. + */ + | 'REJECT_REQUEST' + | 1 + /** + * Drop the client header with name containing underscores. The header is dropped before the filter chain is + * invoked and as such filters will not see dropped headers. The + * "httpN.dropped_headers_with_underscores" is incremented for each dropped header. + */ + | 'DROP_HEADER' + | 2 + +/** + * Action to take when Envoy receives client request with header names containing underscore + * characters. + * Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented + * as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore + * characters. + */ +export type _envoy_config_core_v3_HttpProtocolOptions_HeadersWithUnderscoresAction__Output = typeof _envoy_config_core_v3_HttpProtocolOptions_HeadersWithUnderscoresAction[keyof typeof _envoy_config_core_v3_HttpProtocolOptions_HeadersWithUnderscoresAction] + +/** + * [#next-free-field: 7] + */ +export interface HttpProtocolOptions { + /** + * The idle timeout for connections. The idle timeout is defined as the + * period in which there are no active requests. When the + * idle timeout is reached the connection will be closed. If the connection is an HTTP/2 + * downstream connection a drain sequence will occur prior to closing the connection, see + * :ref:`drain_timeout + * `. + * Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive. + * If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0. + * + * .. warning:: + * Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP + * FIN packets, etc. + * + * If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" + * is configured, this timeout is scaled for downstream connections according to the value for + * :ref:`HTTP_DOWNSTREAM_CONNECTION_IDLE `. + */ + 'idle_timeout'?: (_google_protobuf_Duration | null); + /** + * The maximum number of headers. If unconfigured, the default + * maximum number of request headers allowed is 100. Requests that exceed this limit will receive + * a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. + */ + 'max_headers_count'?: (_google_protobuf_UInt32Value | null); + /** + * The maximum duration of a connection. The duration is defined as a period since a connection + * was established. If not set, there is no max duration. When max_connection_duration is reached + * and if there are no active streams, the connection will be closed. If the connection is a + * downstream connection and there are any active streams, the drain sequence will kick-in, + * and the connection will be force-closed after the drain period. See :ref:`drain_timeout + * `. + */ + 'max_connection_duration'?: (_google_protobuf_Duration | null); + /** + * Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be + * reset independent of any other timeouts. If not specified, this value is not set. + */ + 'max_stream_duration'?: (_google_protobuf_Duration | null); + /** + * Action to take when a client request with a header name containing underscore characters is received. + * If this setting is not specified, the value defaults to ALLOW. + * Note: upstream responses are not affected by this setting. + * Note: this only affects client headers. It does not affect headers added + * by Envoy filters and does not have any impact if added to cluster config. + */ + 'headers_with_underscores_action'?: (_envoy_config_core_v3_HttpProtocolOptions_HeadersWithUnderscoresAction); + /** + * Optional maximum requests for both upstream and downstream connections. + * If not specified, there is no limit. + * Setting this parameter to 1 will effectively disable keep alive. + * For HTTP/2 and HTTP/3, due to concurrent stream processing, the limit is approximate. + */ + 'max_requests_per_connection'?: (_google_protobuf_UInt32Value | null); +} + +/** + * [#next-free-field: 7] + */ +export interface HttpProtocolOptions__Output { + /** + * The idle timeout for connections. The idle timeout is defined as the + * period in which there are no active requests. When the + * idle timeout is reached the connection will be closed. If the connection is an HTTP/2 + * downstream connection a drain sequence will occur prior to closing the connection, see + * :ref:`drain_timeout + * `. + * Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive. + * If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0. + * + * .. warning:: + * Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP + * FIN packets, etc. + * + * If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" + * is configured, this timeout is scaled for downstream connections according to the value for + * :ref:`HTTP_DOWNSTREAM_CONNECTION_IDLE `. + */ + 'idle_timeout': (_google_protobuf_Duration__Output | null); + /** + * The maximum number of headers. If unconfigured, the default + * maximum number of request headers allowed is 100. Requests that exceed this limit will receive + * a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. + */ + 'max_headers_count': (_google_protobuf_UInt32Value__Output | null); + /** + * The maximum duration of a connection. The duration is defined as a period since a connection + * was established. If not set, there is no max duration. When max_connection_duration is reached + * and if there are no active streams, the connection will be closed. If the connection is a + * downstream connection and there are any active streams, the drain sequence will kick-in, + * and the connection will be force-closed after the drain period. See :ref:`drain_timeout + * `. + */ + 'max_connection_duration': (_google_protobuf_Duration__Output | null); + /** + * Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be + * reset independent of any other timeouts. If not specified, this value is not set. + */ + 'max_stream_duration': (_google_protobuf_Duration__Output | null); + /** + * Action to take when a client request with a header name containing underscore characters is received. + * If this setting is not specified, the value defaults to ALLOW. + * Note: upstream responses are not affected by this setting. + * Note: this only affects client headers. It does not affect headers added + * by Envoy filters and does not have any impact if added to cluster config. + */ + 'headers_with_underscores_action': (_envoy_config_core_v3_HttpProtocolOptions_HeadersWithUnderscoresAction__Output); + /** + * Optional maximum requests for both upstream and downstream connections. + * If not specified, there is no limit. + * Setting this parameter to 1 will effectively disable keep alive. + * For HTTP/2 and HTTP/3, due to concurrent stream processing, the limit is approximate. + */ + 'max_requests_per_connection': (_google_protobuf_UInt32Value__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/HttpUri.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/HttpUri.ts similarity index 85% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/HttpUri.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/HttpUri.ts index 19711dde7..9a06ba477 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/HttpUri.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/HttpUri.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/http_uri.proto +// Original file: deps/envoy-api/envoy/config/core/v3/http_uri.proto import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; @@ -30,9 +30,9 @@ export interface HttpUri { /** * Sets the maximum duration in milliseconds that a response can take to arrive upon request. */ - 'timeout'?: (_google_protobuf_Duration); + 'timeout'?: (_google_protobuf_Duration | null); /** - * Specify how `uri` is to be fetched. Today, this requires an explicit + * Specify how ``uri`` is to be fetched. Today, this requires an explicit * cluster, but in the future we may support dynamic cluster creation or * inline DNS resolution. See `issue * `_. @@ -68,9 +68,9 @@ export interface HttpUri__Output { /** * Sets the maximum duration in milliseconds that a response can take to arrive upon request. */ - 'timeout'?: (_google_protobuf_Duration__Output); + 'timeout': (_google_protobuf_Duration__Output | null); /** - * Specify how `uri` is to be fetched. Today, this requires an explicit + * Specify how ``uri`` is to be fetched. Today, this requires an explicit * cluster, but in the future we may support dynamic cluster creation or * inline DNS resolution. See `issue * `_. diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/KeepaliveSettings.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/KeepaliveSettings.ts new file mode 100644 index 000000000..2d2ef0787 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/KeepaliveSettings.ts @@ -0,0 +1,66 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/protocol.proto + +import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; +import type { Percent as _envoy_type_v3_Percent, Percent__Output as _envoy_type_v3_Percent__Output } from '../../../../envoy/type/v3/Percent'; + +export interface KeepaliveSettings { + /** + * Send HTTP/2 PING frames at this period, in order to test that the connection is still alive. + * If this is zero, interval PINGs will not be sent. + */ + 'interval'?: (_google_protobuf_Duration | null); + /** + * How long to wait for a response to a keepalive PING. If a response is not received within this + * time period, the connection will be aborted. Note that in order to prevent the influence of + * Head-of-line (HOL) blocking the timeout period is extended when *any* frame is received on + * the connection, under the assumption that if a frame is received the connection is healthy. + */ + 'timeout'?: (_google_protobuf_Duration | null); + /** + * A random jitter amount as a percentage of interval that will be added to each interval. + * A value of zero means there will be no jitter. + * The default value is 15%. + */ + 'interval_jitter'?: (_envoy_type_v3_Percent | null); + /** + * If the connection has been idle for this duration, send a HTTP/2 ping ahead + * of new stream creation, to quickly detect dead connections. + * If this is zero, this type of PING will not be sent. + * If an interval ping is outstanding, a second ping will not be sent as the + * interval ping will determine if the connection is dead. + * + * The same feature for HTTP/3 is given by inheritance from QUICHE which uses :ref:`connection idle_timeout ` and the current PTO of the connection to decide whether to probe before sending a new request. + */ + 'connection_idle_interval'?: (_google_protobuf_Duration | null); +} + +export interface KeepaliveSettings__Output { + /** + * Send HTTP/2 PING frames at this period, in order to test that the connection is still alive. + * If this is zero, interval PINGs will not be sent. + */ + 'interval': (_google_protobuf_Duration__Output | null); + /** + * How long to wait for a response to a keepalive PING. If a response is not received within this + * time period, the connection will be aborted. Note that in order to prevent the influence of + * Head-of-line (HOL) blocking the timeout period is extended when *any* frame is received on + * the connection, under the assumption that if a frame is received the connection is healthy. + */ + 'timeout': (_google_protobuf_Duration__Output | null); + /** + * A random jitter amount as a percentage of interval that will be added to each interval. + * A value of zero means there will be no jitter. + * The default value is 15%. + */ + 'interval_jitter': (_envoy_type_v3_Percent__Output | null); + /** + * If the connection has been idle for this duration, send a HTTP/2 ping ahead + * of new stream creation, to quickly detect dead connections. + * If this is zero, this type of PING will not be sent. + * If an interval ping is outstanding, a second ping will not be sent as the + * interval ping will determine if the connection is dead. + * + * The same feature for HTTP/3 is given by inheritance from QUICHE which uses :ref:`connection idle_timeout ` and the current PTO of the connection to decide whether to probe before sending a new request. + */ + 'connection_idle_interval': (_google_protobuf_Duration__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/Locality.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/Locality.ts similarity index 79% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/Locality.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/Locality.ts index 49fb232a4..b15b53832 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/Locality.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/Locality.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/base.proto +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto /** @@ -6,13 +6,13 @@ */ export interface Locality { /** - * Region this :ref:`zone ` belongs to. + * Region this :ref:`zone ` belongs to. */ 'region'?: (string); /** * Defines the local service zone where Envoy is running. Though optional, it * should be set if discovery service routing is used and the discovery - * service exposes :ref:`zone data `, + * service exposes :ref:`zone data `, * either in this message or via :option:`--service-zone`. The meaning of zone * is context dependent, e.g. `Availability Zone (AZ) * `_ @@ -33,13 +33,13 @@ export interface Locality { */ export interface Locality__Output { /** - * Region this :ref:`zone ` belongs to. + * Region this :ref:`zone ` belongs to. */ 'region': (string); /** * Defines the local service zone where Envoy is running. Though optional, it * should be set if discovery service routing is used and the discovery - * service exposes :ref:`zone data `, + * service exposes :ref:`zone data `, * either in this message or via :option:`--service-zone`. The meaning of zone * is context dependent, e.g. `Availability Zone (AZ) * `_ diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/Metadata.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/Metadata.ts similarity index 61% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/Metadata.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/Metadata.ts index ca823a27c..2bcd3ce36 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/Metadata.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/Metadata.ts @@ -1,6 +1,7 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/base.proto +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../google/protobuf/Struct'; +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; /** * Metadata provides additional inputs to filters based on matched listeners, @@ -28,10 +29,23 @@ import type { Struct as _google_protobuf_Struct, Struct__Output as _google_proto */ export interface Metadata { /** - * Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* + * Key is the reverse DNS filter name, e.g. com.acme.widget. The ``envoy.*`` * namespace is reserved for Envoy's built-in filters. + * If both ``filter_metadata`` and + * :ref:`typed_filter_metadata ` + * fields are present in the metadata with same keys, + * only ``typed_filter_metadata`` field will be parsed. */ 'filter_metadata'?: ({[key: string]: _google_protobuf_Struct}); + /** + * Key is the reverse DNS filter name, e.g. com.acme.widget. The ``envoy.*`` + * namespace is reserved for Envoy's built-in filters. + * The value is encoded as google.protobuf.Any. + * If both :ref:`filter_metadata ` + * and ``typed_filter_metadata`` fields are present in the metadata with same keys, + * only ``typed_filter_metadata`` field will be parsed. + */ + 'typed_filter_metadata'?: ({[key: string]: _google_protobuf_Any}); } /** @@ -60,8 +74,21 @@ export interface Metadata { */ export interface Metadata__Output { /** - * Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* + * Key is the reverse DNS filter name, e.g. com.acme.widget. The ``envoy.*`` + * namespace is reserved for Envoy's built-in filters. + * If both ``filter_metadata`` and + * :ref:`typed_filter_metadata ` + * fields are present in the metadata with same keys, + * only ``typed_filter_metadata`` field will be parsed. + */ + 'filter_metadata': ({[key: string]: _google_protobuf_Struct__Output}); + /** + * Key is the reverse DNS filter name, e.g. com.acme.widget. The ``envoy.*`` * namespace is reserved for Envoy's built-in filters. + * The value is encoded as google.protobuf.Any. + * If both :ref:`filter_metadata ` + * and ``typed_filter_metadata`` fields are present in the metadata with same keys, + * only ``typed_filter_metadata`` field will be parsed. */ - 'filter_metadata'?: ({[key: string]: _google_protobuf_Struct__Output}); + 'typed_filter_metadata': ({[key: string]: _google_protobuf_Any__Output}); } diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/Node.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/Node.ts similarity index 58% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/Node.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/Node.ts index c6ddea9d4..b29b68502 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/Node.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/Node.ts @@ -1,16 +1,17 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/base.proto +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../google/protobuf/Struct'; -import type { Locality as _envoy_api_v2_core_Locality, Locality__Output as _envoy_api_v2_core_Locality__Output } from '../../../../envoy/api/v2/core/Locality'; -import type { BuildVersion as _envoy_api_v2_core_BuildVersion, BuildVersion__Output as _envoy_api_v2_core_BuildVersion__Output } from '../../../../envoy/api/v2/core/BuildVersion'; -import type { Extension as _envoy_api_v2_core_Extension, Extension__Output as _envoy_api_v2_core_Extension__Output } from '../../../../envoy/api/v2/core/Extension'; -import type { Address as _envoy_api_v2_core_Address, Address__Output as _envoy_api_v2_core_Address__Output } from '../../../../envoy/api/v2/core/Address'; +import type { Locality as _envoy_config_core_v3_Locality, Locality__Output as _envoy_config_core_v3_Locality__Output } from '../../../../envoy/config/core/v3/Locality'; +import type { BuildVersion as _envoy_config_core_v3_BuildVersion, BuildVersion__Output as _envoy_config_core_v3_BuildVersion__Output } from '../../../../envoy/config/core/v3/BuildVersion'; +import type { Extension as _envoy_config_core_v3_Extension, Extension__Output as _envoy_config_core_v3_Extension__Output } from '../../../../envoy/config/core/v3/Extension'; +import type { Address as _envoy_config_core_v3_Address, Address__Output as _envoy_config_core_v3_Address__Output } from '../../../../envoy/config/core/v3/Address'; +import type { ContextParams as _xds_core_v3_ContextParams, ContextParams__Output as _xds_core_v3_ContextParams__Output } from '../../../../xds/core/v3/ContextParams'; /** * Identifies a specific Envoy instance. The node identifier is presented to the * management server, which may use this identifier to distinguish per Envoy * configuration for serving. - * [#next-free-field: 12] + * [#next-free-field: 13] */ export interface Node { /** @@ -27,10 +28,10 @@ export interface Node { * optional, it should be set if any of the following features are used: * :ref:`statsd `, :ref:`health check cluster * verification - * `, - * :ref:`runtime override directory `, + * `, + * :ref:`runtime override directory `, * :ref:`user agent addition - * `, + * `, * :ref:`HTTP global rate limiting `, * :ref:`CDS `, and :ref:`HTTP tracing * `, either in this message or via @@ -41,18 +42,11 @@ export interface Node { * Opaque metadata extending the node identifier. Envoy will pass this * directly to the management server. */ - 'metadata'?: (_google_protobuf_Struct); + 'metadata'?: (_google_protobuf_Struct | null); /** * Locality specifying where the Envoy instance is running. */ - 'locality'?: (_envoy_api_v2_core_Locality); - /** - * This is motivated by informing a management server during canary which - * version of Envoy is being tested in a heterogeneous fleet. This will be set - * by Envoy in management server RPCs. - * This field is deprecated in favor of the user_agent_name and user_agent_version values. - */ - 'build_version'?: (string); + 'locality'?: (_envoy_config_core_v3_Locality | null); /** * Free-form string that identifies the entity requesting config. * E.g. "envoy" or "grpc" @@ -66,15 +60,15 @@ export interface Node { /** * Structured version of the entity requesting config. */ - 'user_agent_build_version'?: (_envoy_api_v2_core_BuildVersion); + 'user_agent_build_version'?: (_envoy_config_core_v3_BuildVersion | null); /** * List of extensions and their versions supported by the node. */ - 'extensions'?: (_envoy_api_v2_core_Extension)[]; + 'extensions'?: (_envoy_config_core_v3_Extension)[]; /** * Client feature support list. These are well known features described * in the Envoy API repository for a given major version of an API. Client features - * use reverse DNS naming scheme, for example `com.acme.feature`. + * use reverse DNS naming scheme, for example ``com.acme.feature``. * See :ref:`the list of features ` that xDS client may * support. */ @@ -83,9 +77,18 @@ export interface Node { * Known listening ports on the node as a generic hint to the management server * for filtering :ref:`listeners ` to be returned. For example, * if there is a listener bound to port 80, the list can optionally contain the - * SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint. + * SocketAddress ``(0.0.0.0,80)``. The field is optional and just a hint. + * @deprecated + */ + 'listening_addresses'?: (_envoy_config_core_v3_Address)[]; + /** + * Map from xDS resource type URL to dynamic context parameters. These may vary at runtime (unlike + * other fields in this message). For example, the xDS client may have a shard identifier that + * changes during the lifetime of the xDS client. In Envoy, this would be achieved by updating the + * dynamic context on the Server::Instance's LocalInfo context provider. The shard ID dynamic + * parameter then appears in this field during future discovery requests. */ - 'listening_addresses'?: (_envoy_api_v2_core_Address)[]; + 'dynamic_parameters'?: ({[key: string]: _xds_core_v3_ContextParams}); 'user_agent_version_type'?: "user_agent_version"|"user_agent_build_version"; } @@ -93,7 +96,7 @@ export interface Node { * Identifies a specific Envoy instance. The node identifier is presented to the * management server, which may use this identifier to distinguish per Envoy * configuration for serving. - * [#next-free-field: 12] + * [#next-free-field: 13] */ export interface Node__Output { /** @@ -110,10 +113,10 @@ export interface Node__Output { * optional, it should be set if any of the following features are used: * :ref:`statsd `, :ref:`health check cluster * verification - * `, - * :ref:`runtime override directory `, + * `, + * :ref:`runtime override directory `, * :ref:`user agent addition - * `, + * `, * :ref:`HTTP global rate limiting `, * :ref:`CDS `, and :ref:`HTTP tracing * `, either in this message or via @@ -124,18 +127,11 @@ export interface Node__Output { * Opaque metadata extending the node identifier. Envoy will pass this * directly to the management server. */ - 'metadata'?: (_google_protobuf_Struct__Output); + 'metadata': (_google_protobuf_Struct__Output | null); /** * Locality specifying where the Envoy instance is running. */ - 'locality'?: (_envoy_api_v2_core_Locality__Output); - /** - * This is motivated by informing a management server during canary which - * version of Envoy is being tested in a heterogeneous fleet. This will be set - * by Envoy in management server RPCs. - * This field is deprecated in favor of the user_agent_name and user_agent_version values. - */ - 'build_version': (string); + 'locality': (_envoy_config_core_v3_Locality__Output | null); /** * Free-form string that identifies the entity requesting config. * E.g. "envoy" or "grpc" @@ -149,15 +145,15 @@ export interface Node__Output { /** * Structured version of the entity requesting config. */ - 'user_agent_build_version'?: (_envoy_api_v2_core_BuildVersion__Output); + 'user_agent_build_version'?: (_envoy_config_core_v3_BuildVersion__Output | null); /** * List of extensions and their versions supported by the node. */ - 'extensions': (_envoy_api_v2_core_Extension__Output)[]; + 'extensions': (_envoy_config_core_v3_Extension__Output)[]; /** * Client feature support list. These are well known features described * in the Envoy API repository for a given major version of an API. Client features - * use reverse DNS naming scheme, for example `com.acme.feature`. + * use reverse DNS naming scheme, for example ``com.acme.feature``. * See :ref:`the list of features ` that xDS client may * support. */ @@ -166,8 +162,17 @@ export interface Node__Output { * Known listening ports on the node as a generic hint to the management server * for filtering :ref:`listeners ` to be returned. For example, * if there is a listener bound to port 80, the list can optionally contain the - * SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint. + * SocketAddress ``(0.0.0.0,80)``. The field is optional and just a hint. + * @deprecated + */ + 'listening_addresses': (_envoy_config_core_v3_Address__Output)[]; + /** + * Map from xDS resource type URL to dynamic context parameters. These may vary at runtime (unlike + * other fields in this message). For example, the xDS client may have a shard identifier that + * changes during the lifetime of the xDS client. In Envoy, this would be achieved by updating the + * dynamic context on the Server::Instance's LocalInfo context provider. The shard ID dynamic + * parameter then appears in this field during future discovery requests. */ - 'listening_addresses': (_envoy_api_v2_core_Address__Output)[]; + 'dynamic_parameters': ({[key: string]: _xds_core_v3_ContextParams__Output}); 'user_agent_version_type': "user_agent_version"|"user_agent_build_version"; } diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/PathConfigSource.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/PathConfigSource.ts new file mode 100644 index 000000000..e620f8c52 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/PathConfigSource.ts @@ -0,0 +1,85 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/config_source.proto + +import type { WatchedDirectory as _envoy_config_core_v3_WatchedDirectory, WatchedDirectory__Output as _envoy_config_core_v3_WatchedDirectory__Output } from '../../../../envoy/config/core/v3/WatchedDirectory'; + +/** + * Local filesystem path configuration source. + */ +export interface PathConfigSource { + /** + * Path on the filesystem to source and watch for configuration updates. + * When sourcing configuration for a :ref:`secret `, + * the certificate and key files are also watched for updates. + * + * .. note:: + * + * The path to the source must exist at config load time. + * + * .. note:: + * + * If ``watched_directory`` is *not* configured, Envoy will watch the file path for *moves*. + * This is because in general only moves are atomic. The same method of swapping files as is + * demonstrated in the :ref:`runtime documentation ` can be + * used here also. If ``watched_directory`` is configured, no watch will be placed directly on + * this path. Instead, the configured ``watched_directory`` will be used to trigger reloads of + * this path. This is required in certain deployment scenarios. See below for more information. + */ + 'path'?: (string); + /** + * If configured, this directory will be watched for *moves*. When an entry in this directory is + * moved to, the ``path`` will be reloaded. This is required in certain deployment scenarios. + * + * Specifically, if trying to load an xDS resource using a + * `Kubernetes ConfigMap `_, the + * following configuration might be used: + * 1. Store xds.yaml inside a ConfigMap. + * 2. Mount the ConfigMap to ``/config_map/xds`` + * 3. Configure path ``/config_map/xds/xds.yaml`` + * 4. Configure watched directory ``/config_map/xds`` + * + * The above configuration will ensure that Envoy watches the owning directory for moves which is + * required due to how Kubernetes manages ConfigMap symbolic links during atomic updates. + */ + 'watched_directory'?: (_envoy_config_core_v3_WatchedDirectory | null); +} + +/** + * Local filesystem path configuration source. + */ +export interface PathConfigSource__Output { + /** + * Path on the filesystem to source and watch for configuration updates. + * When sourcing configuration for a :ref:`secret `, + * the certificate and key files are also watched for updates. + * + * .. note:: + * + * The path to the source must exist at config load time. + * + * .. note:: + * + * If ``watched_directory`` is *not* configured, Envoy will watch the file path for *moves*. + * This is because in general only moves are atomic. The same method of swapping files as is + * demonstrated in the :ref:`runtime documentation ` can be + * used here also. If ``watched_directory`` is configured, no watch will be placed directly on + * this path. Instead, the configured ``watched_directory`` will be used to trigger reloads of + * this path. This is required in certain deployment scenarios. See below for more information. + */ + 'path': (string); + /** + * If configured, this directory will be watched for *moves*. When an entry in this directory is + * moved to, the ``path`` will be reloaded. This is required in certain deployment scenarios. + * + * Specifically, if trying to load an xDS resource using a + * `Kubernetes ConfigMap `_, the + * following configuration might be used: + * 1. Store xds.yaml inside a ConfigMap. + * 2. Mount the ConfigMap to ``/config_map/xds`` + * 3. Configure path ``/config_map/xds/xds.yaml`` + * 4. Configure watched directory ``/config_map/xds`` + * + * The above configuration will ensure that Envoy watches the owning directory for moves which is + * required due to how Kubernetes manages ConfigMap symbolic links during atomic updates. + */ + 'watched_directory': (_envoy_config_core_v3_WatchedDirectory__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/Pipe.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/Pipe.ts similarity index 92% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/Pipe.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/Pipe.ts index 9e6cbb82d..3d8fdb1c8 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/Pipe.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/Pipe.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/address.proto +// Original file: deps/envoy-api/envoy/config/core/v3/address.proto export interface Pipe { diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/ProxyProtocolConfig.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/ProxyProtocolConfig.ts new file mode 100644 index 000000000..34cf7475f --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/ProxyProtocolConfig.ts @@ -0,0 +1,54 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/proxy_protocol.proto + +import type { ProxyProtocolPassThroughTLVs as _envoy_config_core_v3_ProxyProtocolPassThroughTLVs, ProxyProtocolPassThroughTLVs__Output as _envoy_config_core_v3_ProxyProtocolPassThroughTLVs__Output } from '../../../../envoy/config/core/v3/ProxyProtocolPassThroughTLVs'; + +// Original file: deps/envoy-api/envoy/config/core/v3/proxy_protocol.proto + +export const _envoy_config_core_v3_ProxyProtocolConfig_Version = { + /** + * PROXY protocol version 1. Human readable format. + */ + V1: 'V1', + /** + * PROXY protocol version 2. Binary format. + */ + V2: 'V2', +} as const; + +export type _envoy_config_core_v3_ProxyProtocolConfig_Version = + /** + * PROXY protocol version 1. Human readable format. + */ + | 'V1' + | 0 + /** + * PROXY protocol version 2. Binary format. + */ + | 'V2' + | 1 + +export type _envoy_config_core_v3_ProxyProtocolConfig_Version__Output = typeof _envoy_config_core_v3_ProxyProtocolConfig_Version[keyof typeof _envoy_config_core_v3_ProxyProtocolConfig_Version] + +export interface ProxyProtocolConfig { + /** + * The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details + */ + 'version'?: (_envoy_config_core_v3_ProxyProtocolConfig_Version); + /** + * This config controls which TLVs can be passed to upstream if it is Proxy Protocol + * V2 header. If there is no setting for this field, no TLVs will be passed through. + */ + 'pass_through_tlvs'?: (_envoy_config_core_v3_ProxyProtocolPassThroughTLVs | null); +} + +export interface ProxyProtocolConfig__Output { + /** + * The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details + */ + 'version': (_envoy_config_core_v3_ProxyProtocolConfig_Version__Output); + /** + * This config controls which TLVs can be passed to upstream if it is Proxy Protocol + * V2 header. If there is no setting for this field, no TLVs will be passed through. + */ + 'pass_through_tlvs': (_envoy_config_core_v3_ProxyProtocolPassThroughTLVs__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/ProxyProtocolPassThroughTLVs.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/ProxyProtocolPassThroughTLVs.ts new file mode 100644 index 000000000..9f253ceff --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/ProxyProtocolPassThroughTLVs.ts @@ -0,0 +1,57 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/proxy_protocol.proto + + +// Original file: deps/envoy-api/envoy/config/core/v3/proxy_protocol.proto + +export const _envoy_config_core_v3_ProxyProtocolPassThroughTLVs_PassTLVsMatchType = { + /** + * Pass all TLVs. + */ + INCLUDE_ALL: 'INCLUDE_ALL', + /** + * Pass specific TLVs defined in tlv_type. + */ + INCLUDE: 'INCLUDE', +} as const; + +export type _envoy_config_core_v3_ProxyProtocolPassThroughTLVs_PassTLVsMatchType = + /** + * Pass all TLVs. + */ + | 'INCLUDE_ALL' + | 0 + /** + * Pass specific TLVs defined in tlv_type. + */ + | 'INCLUDE' + | 1 + +export type _envoy_config_core_v3_ProxyProtocolPassThroughTLVs_PassTLVsMatchType__Output = typeof _envoy_config_core_v3_ProxyProtocolPassThroughTLVs_PassTLVsMatchType[keyof typeof _envoy_config_core_v3_ProxyProtocolPassThroughTLVs_PassTLVsMatchType] + +export interface ProxyProtocolPassThroughTLVs { + /** + * The strategy to pass through TLVs. Default is INCLUDE_ALL. + * If INCLUDE_ALL is set, all TLVs will be passed through no matter the tlv_type field. + */ + 'match_type'?: (_envoy_config_core_v3_ProxyProtocolPassThroughTLVs_PassTLVsMatchType); + /** + * The TLV types that are applied based on match_type. + * TLV type is defined as uint8_t in proxy protocol. See `the spec + * `_ for details. + */ + 'tlv_type'?: (number)[]; +} + +export interface ProxyProtocolPassThroughTLVs__Output { + /** + * The strategy to pass through TLVs. Default is INCLUDE_ALL. + * If INCLUDE_ALL is set, all TLVs will be passed through no matter the tlv_type field. + */ + 'match_type': (_envoy_config_core_v3_ProxyProtocolPassThroughTLVs_PassTLVsMatchType__Output); + /** + * The TLV types that are applied based on match_type. + * TLV type is defined as uint8_t in proxy protocol. See `the spec + * `_ for details. + */ + 'tlv_type': (number)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/QueryParameter.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/QueryParameter.ts new file mode 100644 index 000000000..4cf7952fb --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/QueryParameter.ts @@ -0,0 +1,30 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto + + +/** + * Query parameter name/value pair. + */ +export interface QueryParameter { + /** + * The key of the query parameter. Case sensitive. + */ + 'key'?: (string); + /** + * The value of the query parameter. + */ + 'value'?: (string); +} + +/** + * Query parameter name/value pair. + */ +export interface QueryParameter__Output { + /** + * The key of the query parameter. Case sensitive. + */ + 'key': (string); + /** + * The value of the query parameter. + */ + 'value': (string); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/QuicKeepAliveSettings.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/QuicKeepAliveSettings.ts new file mode 100644 index 000000000..2bb2aa872 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/QuicKeepAliveSettings.ts @@ -0,0 +1,53 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/protocol.proto + +import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; + +/** + * Config for keepalive probes in a QUIC connection. + * Note that QUIC keep-alive probing packets work differently from HTTP/2 keep-alive PINGs in a sense that the probing packet + * itself doesn't timeout waiting for a probing response. Quic has a shorter idle timeout than TCP, so it doesn't rely on such probing to discover dead connections. If the peer fails to respond, the connection will idle timeout eventually. Thus, they are configured differently from :ref:`connection_keepalive `. + */ +export interface QuicKeepAliveSettings { + /** + * The max interval for a connection to send keep-alive probing packets (with PING or PATH_RESPONSE). The value should be smaller than :ref:`connection idle_timeout ` to prevent idle timeout while not less than 1s to avoid throttling the connection or flooding the peer with probes. + * + * If :ref:`initial_interval ` is absent or zero, a client connection will use this value to start probing. + * + * If zero, disable keepalive probing. + * If absent, use the QUICHE default interval to probe. + */ + 'max_interval'?: (_google_protobuf_Duration | null); + /** + * The interval to send the first few keep-alive probing packets to prevent connection from hitting the idle timeout. Subsequent probes will be sent, each one with an interval exponentially longer than previous one, till it reaches :ref:`max_interval `. And the probes afterwards will always use :ref:`max_interval `. + * + * The value should be smaller than :ref:`connection idle_timeout ` to prevent idle timeout and smaller than max_interval to take effect. + * + * If absent or zero, disable keepalive probing for a server connection. For a client connection, if :ref:`max_interval ` is also zero, do not keepalive, otherwise use max_interval or QUICHE default to probe all the time. + */ + 'initial_interval'?: (_google_protobuf_Duration | null); +} + +/** + * Config for keepalive probes in a QUIC connection. + * Note that QUIC keep-alive probing packets work differently from HTTP/2 keep-alive PINGs in a sense that the probing packet + * itself doesn't timeout waiting for a probing response. Quic has a shorter idle timeout than TCP, so it doesn't rely on such probing to discover dead connections. If the peer fails to respond, the connection will idle timeout eventually. Thus, they are configured differently from :ref:`connection_keepalive `. + */ +export interface QuicKeepAliveSettings__Output { + /** + * The max interval for a connection to send keep-alive probing packets (with PING or PATH_RESPONSE). The value should be smaller than :ref:`connection idle_timeout ` to prevent idle timeout while not less than 1s to avoid throttling the connection or flooding the peer with probes. + * + * If :ref:`initial_interval ` is absent or zero, a client connection will use this value to start probing. + * + * If zero, disable keepalive probing. + * If absent, use the QUICHE default interval to probe. + */ + 'max_interval': (_google_protobuf_Duration__Output | null); + /** + * The interval to send the first few keep-alive probing packets to prevent connection from hitting the idle timeout. Subsequent probes will be sent, each one with an interval exponentially longer than previous one, till it reaches :ref:`max_interval `. And the probes afterwards will always use :ref:`max_interval `. + * + * The value should be smaller than :ref:`connection idle_timeout ` to prevent idle timeout and smaller than max_interval to take effect. + * + * If absent or zero, disable keepalive probing for a server connection. For a client connection, if :ref:`max_interval ` is also zero, do not keepalive, otherwise use max_interval or QUICHE default to probe all the time. + */ + 'initial_interval': (_google_protobuf_Duration__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/QuicProtocolOptions.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/QuicProtocolOptions.ts new file mode 100644 index 000000000..b33feab51 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/QuicProtocolOptions.ts @@ -0,0 +1,96 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/protocol.proto + +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; +import type { QuicKeepAliveSettings as _envoy_config_core_v3_QuicKeepAliveSettings, QuicKeepAliveSettings__Output as _envoy_config_core_v3_QuicKeepAliveSettings__Output } from '../../../../envoy/config/core/v3/QuicKeepAliveSettings'; + +/** + * QUIC protocol options which apply to both downstream and upstream connections. + * [#next-free-field: 6] + */ +export interface QuicProtocolOptions { + /** + * Maximum number of streams that the client can negotiate per connection. 100 + * if not specified. + */ + 'max_concurrent_streams'?: (_google_protobuf_UInt32Value | null); + /** + * `Initial stream-level flow-control receive window + * `_ size. Valid values range from + * 1 to 16777216 (2^24, maximum supported by QUICHE) and defaults to 65536 (2^16). + * + * NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. If configured smaller than it, we will use 16384 instead. + * QUICHE IETF Quic implementation supports 1 bytes window. We only support increasing the default window size now, so it's also the minimum. + * + * This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the + * QUIC stream send and receive buffers. Once the buffer reaches this pointer, watermark callbacks will fire to + * stop the flow of data to the stream buffers. + */ + 'initial_stream_window_size'?: (_google_protobuf_UInt32Value | null); + /** + * Similar to ``initial_stream_window_size``, but for connection-level + * flow-control. Valid values rage from 1 to 25165824 (24MB, maximum supported by QUICHE) and defaults to 65536 (2^16). + * window. Currently, this has the same minimum/default as ``initial_stream_window_size``. + * + * NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. We only support increasing the default + * window size now, so it's also the minimum. + */ + 'initial_connection_window_size'?: (_google_protobuf_UInt32Value | null); + /** + * The number of timeouts that can occur before port migration is triggered for QUIC clients. + * This defaults to 1. If set to 0, port migration will not occur on path degrading. + * Timeout here refers to QUIC internal path degrading timeout mechanism, such as PTO. + * This has no effect on server sessions. + */ + 'num_timeouts_to_trigger_port_migration'?: (_google_protobuf_UInt32Value | null); + /** + * Probes the peer at the configured interval to solicit traffic, i.e. ACK or PATH_RESPONSE, from the peer to push back connection idle timeout. + * If absent, use the default keepalive behavior of which a client connection sends PINGs every 15s, and a server connection doesn't do anything. + */ + 'connection_keepalive'?: (_envoy_config_core_v3_QuicKeepAliveSettings | null); +} + +/** + * QUIC protocol options which apply to both downstream and upstream connections. + * [#next-free-field: 6] + */ +export interface QuicProtocolOptions__Output { + /** + * Maximum number of streams that the client can negotiate per connection. 100 + * if not specified. + */ + 'max_concurrent_streams': (_google_protobuf_UInt32Value__Output | null); + /** + * `Initial stream-level flow-control receive window + * `_ size. Valid values range from + * 1 to 16777216 (2^24, maximum supported by QUICHE) and defaults to 65536 (2^16). + * + * NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. If configured smaller than it, we will use 16384 instead. + * QUICHE IETF Quic implementation supports 1 bytes window. We only support increasing the default window size now, so it's also the minimum. + * + * This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the + * QUIC stream send and receive buffers. Once the buffer reaches this pointer, watermark callbacks will fire to + * stop the flow of data to the stream buffers. + */ + 'initial_stream_window_size': (_google_protobuf_UInt32Value__Output | null); + /** + * Similar to ``initial_stream_window_size``, but for connection-level + * flow-control. Valid values rage from 1 to 25165824 (24MB, maximum supported by QUICHE) and defaults to 65536 (2^16). + * window. Currently, this has the same minimum/default as ``initial_stream_window_size``. + * + * NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. We only support increasing the default + * window size now, so it's also the minimum. + */ + 'initial_connection_window_size': (_google_protobuf_UInt32Value__Output | null); + /** + * The number of timeouts that can occur before port migration is triggered for QUIC clients. + * This defaults to 1. If set to 0, port migration will not occur on path degrading. + * Timeout here refers to QUIC internal path degrading timeout mechanism, such as PTO. + * This has no effect on server sessions. + */ + 'num_timeouts_to_trigger_port_migration': (_google_protobuf_UInt32Value__Output | null); + /** + * Probes the peer at the configured interval to solicit traffic, i.e. ACK or PATH_RESPONSE, from the peer to push back connection idle timeout. + * If absent, use the default keepalive behavior of which a client connection sends PINGs every 15s, and a server connection doesn't do anything. + */ + 'connection_keepalive': (_envoy_config_core_v3_QuicKeepAliveSettings__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/RateLimitSettings.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RateLimitSettings.ts similarity index 79% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/RateLimitSettings.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/RateLimitSettings.ts index 222c86eb4..bf002d99c 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/RateLimitSettings.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RateLimitSettings.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/config_source.proto +// Original file: deps/envoy-api/envoy/config/core/v3/config_source.proto import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; import type { DoubleValue as _google_protobuf_DoubleValue, DoubleValue__Output as _google_protobuf_DoubleValue__Output } from '../../../../google/protobuf/DoubleValue'; @@ -11,12 +11,12 @@ export interface RateLimitSettings { * Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a * default value of 100 will be used. */ - 'max_tokens'?: (_google_protobuf_UInt32Value); + 'max_tokens'?: (_google_protobuf_UInt32Value | null); /** * Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens * per second will be used. */ - 'fill_rate'?: (_google_protobuf_DoubleValue); + 'fill_rate'?: (_google_protobuf_DoubleValue | null); } /** @@ -27,10 +27,10 @@ export interface RateLimitSettings__Output { * Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a * default value of 100 will be used. */ - 'max_tokens'?: (_google_protobuf_UInt32Value__Output); + 'max_tokens': (_google_protobuf_UInt32Value__Output | null); /** * Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens * per second will be used. */ - 'fill_rate'?: (_google_protobuf_DoubleValue__Output); + 'fill_rate': (_google_protobuf_DoubleValue__Output | null); } diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RemoteDataSource.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RemoteDataSource.ts new file mode 100644 index 000000000..917304d97 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RemoteDataSource.ts @@ -0,0 +1,40 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto + +import type { HttpUri as _envoy_config_core_v3_HttpUri, HttpUri__Output as _envoy_config_core_v3_HttpUri__Output } from '../../../../envoy/config/core/v3/HttpUri'; +import type { RetryPolicy as _envoy_config_core_v3_RetryPolicy, RetryPolicy__Output as _envoy_config_core_v3_RetryPolicy__Output } from '../../../../envoy/config/core/v3/RetryPolicy'; + +/** + * The message specifies how to fetch data from remote and how to verify it. + */ +export interface RemoteDataSource { + /** + * The HTTP URI to fetch the remote data. + */ + 'http_uri'?: (_envoy_config_core_v3_HttpUri | null); + /** + * SHA256 string for verifying data. + */ + 'sha256'?: (string); + /** + * Retry policy for fetching remote data. + */ + 'retry_policy'?: (_envoy_config_core_v3_RetryPolicy | null); +} + +/** + * The message specifies how to fetch data from remote and how to verify it. + */ +export interface RemoteDataSource__Output { + /** + * The HTTP URI to fetch the remote data. + */ + 'http_uri': (_envoy_config_core_v3_HttpUri__Output | null); + /** + * SHA256 string for verifying data. + */ + 'sha256': (string); + /** + * Retry policy for fetching remote data. + */ + 'retry_policy': (_envoy_config_core_v3_RetryPolicy__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RequestMethod.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RequestMethod.ts new file mode 100644 index 000000000..67d40fda6 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RequestMethod.ts @@ -0,0 +1,47 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto + +/** + * HTTP request method. + */ +export const RequestMethod = { + METHOD_UNSPECIFIED: 'METHOD_UNSPECIFIED', + GET: 'GET', + HEAD: 'HEAD', + POST: 'POST', + PUT: 'PUT', + DELETE: 'DELETE', + CONNECT: 'CONNECT', + OPTIONS: 'OPTIONS', + TRACE: 'TRACE', + PATCH: 'PATCH', +} as const; + +/** + * HTTP request method. + */ +export type RequestMethod = + | 'METHOD_UNSPECIFIED' + | 0 + | 'GET' + | 1 + | 'HEAD' + | 2 + | 'POST' + | 3 + | 'PUT' + | 4 + | 'DELETE' + | 5 + | 'CONNECT' + | 6 + | 'OPTIONS' + | 7 + | 'TRACE' + | 8 + | 'PATCH' + | 9 + +/** + * HTTP request method. + */ +export type RequestMethod__Output = typeof RequestMethod[keyof typeof RequestMethod] diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/RetryPolicy.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RetryPolicy.ts similarity index 63% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/RetryPolicy.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/RetryPolicy.ts index 27c1096b7..6e2af23e6 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/RetryPolicy.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RetryPolicy.ts @@ -1,6 +1,6 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/base.proto +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto -import type { BackoffStrategy as _envoy_api_v2_core_BackoffStrategy, BackoffStrategy__Output as _envoy_api_v2_core_BackoffStrategy__Output } from '../../../../envoy/api/v2/core/BackoffStrategy'; +import type { BackoffStrategy as _envoy_config_core_v3_BackoffStrategy, BackoffStrategy__Output as _envoy_config_core_v3_BackoffStrategy__Output } from '../../../../envoy/config/core/v3/BackoffStrategy'; import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; /** @@ -8,16 +8,16 @@ import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output a */ export interface RetryPolicy { /** - * Specifies parameters that control :ref:`retry backoff strategy `. + * Specifies parameters that control :ref:`retry backoff strategy `. * This parameter is optional, in which case the default base interval is 1000 milliseconds. The * default maximum interval is 10 times the base interval. */ - 'retry_back_off'?: (_envoy_api_v2_core_BackoffStrategy); + 'retry_back_off'?: (_envoy_config_core_v3_BackoffStrategy | null); /** * Specifies the allowed number of retries. This parameter is optional and * defaults to 1. */ - 'num_retries'?: (_google_protobuf_UInt32Value); + 'num_retries'?: (_google_protobuf_UInt32Value | null); } /** @@ -25,14 +25,14 @@ export interface RetryPolicy { */ export interface RetryPolicy__Output { /** - * Specifies parameters that control :ref:`retry backoff strategy `. + * Specifies parameters that control :ref:`retry backoff strategy `. * This parameter is optional, in which case the default base interval is 1000 milliseconds. The * default maximum interval is 10 times the base interval. */ - 'retry_back_off'?: (_envoy_api_v2_core_BackoffStrategy__Output); + 'retry_back_off': (_envoy_config_core_v3_BackoffStrategy__Output | null); /** * Specifies the allowed number of retries. This parameter is optional and * defaults to 1. */ - 'num_retries'?: (_google_protobuf_UInt32Value__Output); + 'num_retries': (_google_protobuf_UInt32Value__Output | null); } diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RoutingPriority.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RoutingPriority.ts new file mode 100644 index 000000000..41172d92f --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RoutingPriority.ts @@ -0,0 +1,41 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto + +/** + * Envoy supports :ref:`upstream priority routing + * ` both at the route and the virtual + * cluster level. The current priority implementation uses different connection + * pool and circuit breaking settings for each priority level. This means that + * even for HTTP/2 requests, two physical connections will be used to an + * upstream host. In the future Envoy will likely support true HTTP/2 priority + * over a single upstream connection. + */ +export const RoutingPriority = { + DEFAULT: 'DEFAULT', + HIGH: 'HIGH', +} as const; + +/** + * Envoy supports :ref:`upstream priority routing + * ` both at the route and the virtual + * cluster level. The current priority implementation uses different connection + * pool and circuit breaking settings for each priority level. This means that + * even for HTTP/2 requests, two physical connections will be used to an + * upstream host. In the future Envoy will likely support true HTTP/2 priority + * over a single upstream connection. + */ +export type RoutingPriority = + | 'DEFAULT' + | 0 + | 'HIGH' + | 1 + +/** + * Envoy supports :ref:`upstream priority routing + * ` both at the route and the virtual + * cluster level. The current priority implementation uses different connection + * pool and circuit breaking settings for each priority level. This means that + * even for HTTP/2 requests, two physical connections will be used to an + * upstream host. In the future Envoy will likely support true HTTP/2 priority + * over a single upstream connection. + */ +export type RoutingPriority__Output = typeof RoutingPriority[keyof typeof RoutingPriority] diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/RuntimeDouble.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RuntimeDouble.ts similarity index 86% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/RuntimeDouble.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/RuntimeDouble.ts index 8d9aba3e0..a0f849ab5 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/RuntimeDouble.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RuntimeDouble.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/base.proto +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto /** @@ -22,7 +22,7 @@ export interface RuntimeDouble__Output { /** * Default value if runtime value is not available. */ - 'default_value': (number | string); + 'default_value': (number); /** * Runtime key to get value for comparison. This value is used if defined. */ diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/RuntimeFeatureFlag.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RuntimeFeatureFlag.ts similarity index 84% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/RuntimeFeatureFlag.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/RuntimeFeatureFlag.ts index 47cf24097..b6df8d617 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/RuntimeFeatureFlag.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RuntimeFeatureFlag.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/base.proto +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; @@ -9,7 +9,7 @@ export interface RuntimeFeatureFlag { /** * Default value if runtime value is not available. */ - 'default_value'?: (_google_protobuf_BoolValue); + 'default_value'?: (_google_protobuf_BoolValue | null); /** * Runtime key to get value for comparison. This value is used if defined. The boolean value must * be represented via its @@ -25,7 +25,7 @@ export interface RuntimeFeatureFlag__Output { /** * Default value if runtime value is not available. */ - 'default_value'?: (_google_protobuf_BoolValue__Output); + 'default_value': (_google_protobuf_BoolValue__Output | null); /** * Runtime key to get value for comparison. This value is used if defined. The boolean value must * be represented via its diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/RuntimeFractionalPercent.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RuntimeFractionalPercent.ts similarity index 65% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/RuntimeFractionalPercent.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/RuntimeFractionalPercent.ts index 08e29de1f..d976b4375 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/RuntimeFractionalPercent.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RuntimeFractionalPercent.ts @@ -1,6 +1,6 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/base.proto +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto -import type { FractionalPercent as _envoy_type_FractionalPercent, FractionalPercent__Output as _envoy_type_FractionalPercent__Output } from '../../../../envoy/type/FractionalPercent'; +import type { FractionalPercent as _envoy_type_v3_FractionalPercent, FractionalPercent__Output as _envoy_type_v3_FractionalPercent__Output } from '../../../../envoy/type/v3/FractionalPercent'; /** * Runtime derived FractionalPercent with defaults for when the numerator or denominator is not @@ -9,16 +9,16 @@ import type { FractionalPercent as _envoy_type_FractionalPercent, FractionalPerc * .. note:: * * Parsing of the runtime key's data is implemented such that it may be represented as a - * :ref:`FractionalPercent ` proto represented as JSON/YAML + * :ref:`FractionalPercent ` proto represented as JSON/YAML * and may also be represented as an integer with the assumption that the value is an integral * percentage out of 100. For instance, a runtime key lookup returning the value "42" would parse - * as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED. + * as a ``FractionalPercent`` whose numerator is 42 and denominator is HUNDRED. */ export interface RuntimeFractionalPercent { /** * Default value if the runtime value's for the numerator/denominator keys are not available. */ - 'default_value'?: (_envoy_type_FractionalPercent); + 'default_value'?: (_envoy_type_v3_FractionalPercent | null); /** * Runtime key for a YAML representation of a FractionalPercent. */ @@ -32,16 +32,16 @@ export interface RuntimeFractionalPercent { * .. note:: * * Parsing of the runtime key's data is implemented such that it may be represented as a - * :ref:`FractionalPercent ` proto represented as JSON/YAML + * :ref:`FractionalPercent ` proto represented as JSON/YAML * and may also be represented as an integer with the assumption that the value is an integral * percentage out of 100. For instance, a runtime key lookup returning the value "42" would parse - * as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED. + * as a ``FractionalPercent`` whose numerator is 42 and denominator is HUNDRED. */ export interface RuntimeFractionalPercent__Output { /** * Default value if the runtime value's for the numerator/denominator keys are not available. */ - 'default_value'?: (_envoy_type_FractionalPercent__Output); + 'default_value': (_envoy_type_v3_FractionalPercent__Output | null); /** * Runtime key for a YAML representation of a FractionalPercent. */ diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RuntimePercent.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RuntimePercent.ts new file mode 100644 index 000000000..1dbe6ea4a --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RuntimePercent.ts @@ -0,0 +1,31 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto + +import type { Percent as _envoy_type_v3_Percent, Percent__Output as _envoy_type_v3_Percent__Output } from '../../../../envoy/type/v3/Percent'; + +/** + * Runtime derived percentage with a default when not specified. + */ +export interface RuntimePercent { + /** + * Default value if runtime value is not available. + */ + 'default_value'?: (_envoy_type_v3_Percent | null); + /** + * Runtime key to get value for comparison. This value is used if defined. + */ + 'runtime_key'?: (string); +} + +/** + * Runtime derived percentage with a default when not specified. + */ +export interface RuntimePercent__Output { + /** + * Default value if runtime value is not available. + */ + 'default_value': (_envoy_type_v3_Percent__Output | null); + /** + * Runtime key to get value for comparison. This value is used if defined. + */ + 'runtime_key': (string); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/RuntimeUInt32.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RuntimeUInt32.ts similarity index 90% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/RuntimeUInt32.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/RuntimeUInt32.ts index 72e8972a4..6cc9eead6 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/RuntimeUInt32.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/RuntimeUInt32.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/base.proto +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto /** diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/SchemeHeaderTransformation.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/SchemeHeaderTransformation.ts new file mode 100644 index 000000000..95bb4e400 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/SchemeHeaderTransformation.ts @@ -0,0 +1,24 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/protocol.proto + + +/** + * A message to control transformations to the :scheme header + */ +export interface SchemeHeaderTransformation { + /** + * Overwrite any Scheme header with the contents of this string. + */ + 'scheme_to_overwrite'?: (string); + 'transformation'?: "scheme_to_overwrite"; +} + +/** + * A message to control transformations to the :scheme header + */ +export interface SchemeHeaderTransformation__Output { + /** + * Overwrite any Scheme header with the contents of this string. + */ + 'scheme_to_overwrite'?: (string); + 'transformation': "scheme_to_overwrite"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/SelfConfigSource.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/SelfConfigSource.ts new file mode 100644 index 000000000..939c4fd3d --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/SelfConfigSource.ts @@ -0,0 +1,31 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/config_source.proto + +import type { ApiVersion as _envoy_config_core_v3_ApiVersion, ApiVersion__Output as _envoy_config_core_v3_ApiVersion__Output } from '../../../../envoy/config/core/v3/ApiVersion'; + +/** + * [#not-implemented-hide:] + * Self-referencing config source options. This is currently empty, but when + * set in :ref:`ConfigSource ` can be used to + * specify that other data can be obtained from the same server. + */ +export interface SelfConfigSource { + /** + * API version for xDS transport protocol. This describes the xDS gRPC/REST + * endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + */ + 'transport_api_version'?: (_envoy_config_core_v3_ApiVersion); +} + +/** + * [#not-implemented-hide:] + * Self-referencing config source options. This is currently empty, but when + * set in :ref:`ConfigSource ` can be used to + * specify that other data can be obtained from the same server. + */ +export interface SelfConfigSource__Output { + /** + * API version for xDS transport protocol. This describes the xDS gRPC/REST + * endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. + */ + 'transport_api_version': (_envoy_config_core_v3_ApiVersion__Output); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/SocketAddress.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/SocketAddress.ts similarity index 57% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/SocketAddress.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/SocketAddress.ts index f81c981c1..f939393fb 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/SocketAddress.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/SocketAddress.ts @@ -1,36 +1,44 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/address.proto +// Original file: deps/envoy-api/envoy/config/core/v3/address.proto -// Original file: deps/envoy-api/envoy/api/v2/core/address.proto +// Original file: deps/envoy-api/envoy/config/core/v3/address.proto -export enum _envoy_api_v2_core_SocketAddress_Protocol { - TCP = 0, - UDP = 1, -} +export const _envoy_config_core_v3_SocketAddress_Protocol = { + TCP: 'TCP', + UDP: 'UDP', +} as const; + +export type _envoy_config_core_v3_SocketAddress_Protocol = + | 'TCP' + | 0 + | 'UDP' + | 1 + +export type _envoy_config_core_v3_SocketAddress_Protocol__Output = typeof _envoy_config_core_v3_SocketAddress_Protocol[keyof typeof _envoy_config_core_v3_SocketAddress_Protocol] /** * [#next-free-field: 7] */ export interface SocketAddress { - 'protocol'?: (_envoy_api_v2_core_SocketAddress_Protocol | keyof typeof _envoy_api_v2_core_SocketAddress_Protocol); + 'protocol'?: (_envoy_config_core_v3_SocketAddress_Protocol); /** * The address for this socket. :ref:`Listeners ` will bind * to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::`` * to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: * It is possible to distinguish a Listener address via the prefix/suffix matching - * in :ref:`FilterChainMatch `.] When used - * within an upstream :ref:`BindConfig `, the address + * in :ref:`FilterChainMatch `.] When used + * within an upstream :ref:`BindConfig `, the address * controls the source address of outbound connections. For :ref:`clusters - * `, the cluster type determines whether the - * address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS - * (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized - * via :ref:`resolver_name `. + * `, the cluster type determines whether the + * address must be an IP (``STATIC`` or ``EDS`` clusters) or a hostname resolved by DNS + * (``STRICT_DNS`` or ``LOGICAL_DNS`` clusters). Address resolution can be customized + * via :ref:`resolver_name `. */ 'address'?: (string); 'port_value'?: (number); /** * This is only valid if :ref:`resolver_name - * ` is specified below and the + * ` is specified below and the * named resolver is capable of named port resolution. */ 'named_port'?: (string); @@ -39,7 +47,7 @@ export interface SocketAddress { * this is empty, a context dependent default applies. If the address is a concrete * IP address, no resolution will occur. If address is a hostname this * should be set for resolution other than DNS. Specifying a custom resolver with - * *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime. + * ``STRICT_DNS`` or ``LOGICAL_DNS`` will generate an error at runtime. */ 'resolver_name'?: (string); /** @@ -56,25 +64,25 @@ export interface SocketAddress { * [#next-free-field: 7] */ export interface SocketAddress__Output { - 'protocol': (keyof typeof _envoy_api_v2_core_SocketAddress_Protocol); + 'protocol': (_envoy_config_core_v3_SocketAddress_Protocol__Output); /** * The address for this socket. :ref:`Listeners ` will bind * to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::`` * to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: * It is possible to distinguish a Listener address via the prefix/suffix matching - * in :ref:`FilterChainMatch `.] When used - * within an upstream :ref:`BindConfig `, the address + * in :ref:`FilterChainMatch `.] When used + * within an upstream :ref:`BindConfig `, the address * controls the source address of outbound connections. For :ref:`clusters - * `, the cluster type determines whether the - * address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS - * (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized - * via :ref:`resolver_name `. + * `, the cluster type determines whether the + * address must be an IP (``STATIC`` or ``EDS`` clusters) or a hostname resolved by DNS + * (``STRICT_DNS`` or ``LOGICAL_DNS`` clusters). Address resolution can be customized + * via :ref:`resolver_name `. */ 'address': (string); 'port_value'?: (number); /** * This is only valid if :ref:`resolver_name - * ` is specified below and the + * ` is specified below and the * named resolver is capable of named port resolution. */ 'named_port'?: (string); @@ -83,7 +91,7 @@ export interface SocketAddress__Output { * this is empty, a context dependent default applies. If the address is a concrete * IP address, no resolution will occur. If address is a hostname this * should be set for resolution other than DNS. Specifying a custom resolver with - * *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime. + * ``STRICT_DNS`` or ``LOGICAL_DNS`` will generate an error at runtime. */ 'resolver_name': (string); /** diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/SocketOption.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/SocketOption.ts new file mode 100644 index 000000000..9b3bc0019 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/SocketOption.ts @@ -0,0 +1,149 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/socket_option.proto + +import type { Long } from '@grpc/proto-loader'; + +// Original file: deps/envoy-api/envoy/config/core/v3/socket_option.proto + +export const _envoy_config_core_v3_SocketOption_SocketState = { + /** + * Socket options are applied after socket creation but before binding the socket to a port + */ + STATE_PREBIND: 'STATE_PREBIND', + /** + * Socket options are applied after binding the socket to a port but before calling listen() + */ + STATE_BOUND: 'STATE_BOUND', + /** + * Socket options are applied after calling listen() + */ + STATE_LISTENING: 'STATE_LISTENING', +} as const; + +export type _envoy_config_core_v3_SocketOption_SocketState = + /** + * Socket options are applied after socket creation but before binding the socket to a port + */ + | 'STATE_PREBIND' + | 0 + /** + * Socket options are applied after binding the socket to a port but before calling listen() + */ + | 'STATE_BOUND' + | 1 + /** + * Socket options are applied after calling listen() + */ + | 'STATE_LISTENING' + | 2 + +export type _envoy_config_core_v3_SocketOption_SocketState__Output = typeof _envoy_config_core_v3_SocketOption_SocketState[keyof typeof _envoy_config_core_v3_SocketOption_SocketState] + +/** + * Generic socket option message. This would be used to set socket options that + * might not exist in upstream kernels or precompiled Envoy binaries. + * + * For example: + * + * .. code-block:: json + * + * { + * "description": "support tcp keep alive", + * "state": 0, + * "level": 1, + * "name": 9, + * "int_value": 1, + * } + * + * 1 means SOL_SOCKET and 9 means SO_KEEPALIVE on Linux. + * With the above configuration, `TCP Keep-Alives `_ + * can be enabled in socket with Linux, which can be used in + * :ref:`listener's` or + * :ref:`admin's ` socket_options etc. + * + * It should be noted that the name or level may have different values on different platforms. + * [#next-free-field: 7] + */ +export interface SocketOption { + /** + * An optional name to give this socket option for debugging, etc. + * Uniqueness is not required and no special meaning is assumed. + */ + 'description'?: (string); + /** + * Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP + */ + 'level'?: (number | string | Long); + /** + * The numeric name as passed to setsockopt + */ + 'name'?: (number | string | Long); + /** + * Because many sockopts take an int value. + */ + 'int_value'?: (number | string | Long); + /** + * Otherwise it's a byte buffer. + */ + 'buf_value'?: (Buffer | Uint8Array | string); + /** + * The state in which the option will be applied. When used in BindConfig + * STATE_PREBIND is currently the only valid value. + */ + 'state'?: (_envoy_config_core_v3_SocketOption_SocketState); + 'value'?: "int_value"|"buf_value"; +} + +/** + * Generic socket option message. This would be used to set socket options that + * might not exist in upstream kernels or precompiled Envoy binaries. + * + * For example: + * + * .. code-block:: json + * + * { + * "description": "support tcp keep alive", + * "state": 0, + * "level": 1, + * "name": 9, + * "int_value": 1, + * } + * + * 1 means SOL_SOCKET and 9 means SO_KEEPALIVE on Linux. + * With the above configuration, `TCP Keep-Alives `_ + * can be enabled in socket with Linux, which can be used in + * :ref:`listener's` or + * :ref:`admin's ` socket_options etc. + * + * It should be noted that the name or level may have different values on different platforms. + * [#next-free-field: 7] + */ +export interface SocketOption__Output { + /** + * An optional name to give this socket option for debugging, etc. + * Uniqueness is not required and no special meaning is assumed. + */ + 'description': (string); + /** + * Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP + */ + 'level': (string); + /** + * The numeric name as passed to setsockopt + */ + 'name': (string); + /** + * Because many sockopts take an int value. + */ + 'int_value'?: (string); + /** + * Otherwise it's a byte buffer. + */ + 'buf_value'?: (Buffer); + /** + * The state in which the option will be applied. When used in BindConfig + * STATE_PREBIND is currently the only valid value. + */ + 'state': (_envoy_config_core_v3_SocketOption_SocketState__Output); + 'value': "int_value"|"buf_value"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/SocketOptionsOverride.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/SocketOptionsOverride.ts new file mode 100644 index 000000000..5df984579 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/SocketOptionsOverride.ts @@ -0,0 +1,11 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/socket_option.proto + +import type { SocketOption as _envoy_config_core_v3_SocketOption, SocketOption__Output as _envoy_config_core_v3_SocketOption__Output } from '../../../../envoy/config/core/v3/SocketOption'; + +export interface SocketOptionsOverride { + 'socket_options'?: (_envoy_config_core_v3_SocketOption)[]; +} + +export interface SocketOptionsOverride__Output { + 'socket_options': (_envoy_config_core_v3_SocketOption__Output)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/SubstitutionFormatString.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/SubstitutionFormatString.ts new file mode 100644 index 000000000..01a97441c --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/SubstitutionFormatString.ts @@ -0,0 +1,201 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/substitution_format_string.proto + +import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../google/protobuf/Struct'; +import type { DataSource as _envoy_config_core_v3_DataSource, DataSource__Output as _envoy_config_core_v3_DataSource__Output } from '../../../../envoy/config/core/v3/DataSource'; +import type { TypedExtensionConfig as _envoy_config_core_v3_TypedExtensionConfig, TypedExtensionConfig__Output as _envoy_config_core_v3_TypedExtensionConfig__Output } from '../../../../envoy/config/core/v3/TypedExtensionConfig'; + +/** + * Configuration to use multiple :ref:`command operators ` + * to generate a new string in either plain text or JSON format. + * [#next-free-field: 7] + */ +export interface SubstitutionFormatString { + /** + * Specify a format with command operators to form a text string. + * Its details is described in :ref:`format string`. + * + * For example, setting ``text_format`` like below, + * + * .. validated-code-block:: yaml + * :type-name: envoy.config.core.v3.SubstitutionFormatString + * + * text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" + * + * generates plain text similar to: + * + * .. code-block:: text + * + * upstream connect error:503:path=/foo + * + * Deprecated in favor of :ref:`text_format_source `. To migrate text format strings, use the :ref:`inline_string ` field. + * @deprecated + */ + 'text_format'?: (string); + /** + * Specify a format with command operators to form a JSON string. + * Its details is described in :ref:`format dictionary`. + * Values are rendered as strings, numbers, or boolean values as appropriate. + * Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). + * See the documentation for a specific command operator for details. + * + * .. validated-code-block:: yaml + * :type-name: envoy.config.core.v3.SubstitutionFormatString + * + * json_format: + * status: "%RESPONSE_CODE%" + * message: "%LOCAL_REPLY_BODY%" + * + * The following JSON object would be created: + * + * .. code-block:: json + * + * { + * "status": 500, + * "message": "My error message" + * } + */ + 'json_format'?: (_google_protobuf_Struct | null); + /** + * If set to true, when command operators are evaluated to null, + * + * * for ``text_format``, the output of the empty operator is changed from ``-`` to an + * empty string, so that empty values are omitted entirely. + * * for ``json_format`` the keys with null values are omitted in the output structure. + */ + 'omit_empty_values'?: (boolean); + /** + * Specify a ``content_type`` field. + * If this field is not set then ``text/plain`` is used for ``text_format`` and + * ``application/json`` is used for ``json_format``. + * + * .. validated-code-block:: yaml + * :type-name: envoy.config.core.v3.SubstitutionFormatString + * + * content_type: "text/html; charset=UTF-8" + */ + 'content_type'?: (string); + /** + * Specify a format with command operators to form a text string. + * Its details is described in :ref:`format string`. + * + * For example, setting ``text_format`` like below, + * + * .. validated-code-block:: yaml + * :type-name: envoy.config.core.v3.SubstitutionFormatString + * + * text_format_source: + * inline_string: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" + * + * generates plain text similar to: + * + * .. code-block:: text + * + * upstream connect error:503:path=/foo + */ + 'text_format_source'?: (_envoy_config_core_v3_DataSource | null); + /** + * Specifies a collection of Formatter plugins that can be called from the access log configuration. + * See the formatters extensions documentation for details. + * [#extension-category: envoy.formatter] + */ + 'formatters'?: (_envoy_config_core_v3_TypedExtensionConfig)[]; + 'format'?: "text_format"|"json_format"|"text_format_source"; +} + +/** + * Configuration to use multiple :ref:`command operators ` + * to generate a new string in either plain text or JSON format. + * [#next-free-field: 7] + */ +export interface SubstitutionFormatString__Output { + /** + * Specify a format with command operators to form a text string. + * Its details is described in :ref:`format string`. + * + * For example, setting ``text_format`` like below, + * + * .. validated-code-block:: yaml + * :type-name: envoy.config.core.v3.SubstitutionFormatString + * + * text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" + * + * generates plain text similar to: + * + * .. code-block:: text + * + * upstream connect error:503:path=/foo + * + * Deprecated in favor of :ref:`text_format_source `. To migrate text format strings, use the :ref:`inline_string ` field. + * @deprecated + */ + 'text_format'?: (string); + /** + * Specify a format with command operators to form a JSON string. + * Its details is described in :ref:`format dictionary`. + * Values are rendered as strings, numbers, or boolean values as appropriate. + * Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). + * See the documentation for a specific command operator for details. + * + * .. validated-code-block:: yaml + * :type-name: envoy.config.core.v3.SubstitutionFormatString + * + * json_format: + * status: "%RESPONSE_CODE%" + * message: "%LOCAL_REPLY_BODY%" + * + * The following JSON object would be created: + * + * .. code-block:: json + * + * { + * "status": 500, + * "message": "My error message" + * } + */ + 'json_format'?: (_google_protobuf_Struct__Output | null); + /** + * If set to true, when command operators are evaluated to null, + * + * * for ``text_format``, the output of the empty operator is changed from ``-`` to an + * empty string, so that empty values are omitted entirely. + * * for ``json_format`` the keys with null values are omitted in the output structure. + */ + 'omit_empty_values': (boolean); + /** + * Specify a ``content_type`` field. + * If this field is not set then ``text/plain`` is used for ``text_format`` and + * ``application/json`` is used for ``json_format``. + * + * .. validated-code-block:: yaml + * :type-name: envoy.config.core.v3.SubstitutionFormatString + * + * content_type: "text/html; charset=UTF-8" + */ + 'content_type': (string); + /** + * Specify a format with command operators to form a text string. + * Its details is described in :ref:`format string`. + * + * For example, setting ``text_format`` like below, + * + * .. validated-code-block:: yaml + * :type-name: envoy.config.core.v3.SubstitutionFormatString + * + * text_format_source: + * inline_string: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" + * + * generates plain text similar to: + * + * .. code-block:: text + * + * upstream connect error:503:path=/foo + */ + 'text_format_source'?: (_envoy_config_core_v3_DataSource__Output | null); + /** + * Specifies a collection of Formatter plugins that can be called from the access log configuration. + * See the formatters extensions documentation for details. + * [#extension-category: envoy.formatter] + */ + 'formatters': (_envoy_config_core_v3_TypedExtensionConfig__Output)[]; + 'format': "text_format"|"json_format"|"text_format_source"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/TcpKeepalive.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/TcpKeepalive.ts similarity index 75% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/TcpKeepalive.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/TcpKeepalive.ts index 394a54feb..1ad81091d 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/TcpKeepalive.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/TcpKeepalive.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/address.proto +// Original file: deps/envoy-api/envoy/config/core/v3/address.proto import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; @@ -8,18 +8,18 @@ export interface TcpKeepalive { * the connection is dead. Default is to use the OS level configuration (unless * overridden, Linux defaults to 9.) */ - 'keepalive_probes'?: (_google_protobuf_UInt32Value); + 'keepalive_probes'?: (_google_protobuf_UInt32Value | null); /** * The number of seconds a connection needs to be idle before keep-alive probes * start being sent. Default is to use the OS level configuration (unless * overridden, Linux defaults to 7200s (i.e., 2 hours.) */ - 'keepalive_time'?: (_google_protobuf_UInt32Value); + 'keepalive_time'?: (_google_protobuf_UInt32Value | null); /** * The number of seconds between keep-alive probes. Default is to use the OS * level configuration (unless overridden, Linux defaults to 75s.) */ - 'keepalive_interval'?: (_google_protobuf_UInt32Value); + 'keepalive_interval'?: (_google_protobuf_UInt32Value | null); } export interface TcpKeepalive__Output { @@ -28,16 +28,16 @@ export interface TcpKeepalive__Output { * the connection is dead. Default is to use the OS level configuration (unless * overridden, Linux defaults to 9.) */ - 'keepalive_probes'?: (_google_protobuf_UInt32Value__Output); + 'keepalive_probes': (_google_protobuf_UInt32Value__Output | null); /** * The number of seconds a connection needs to be idle before keep-alive probes * start being sent. Default is to use the OS level configuration (unless * overridden, Linux defaults to 7200s (i.e., 2 hours.) */ - 'keepalive_time'?: (_google_protobuf_UInt32Value__Output); + 'keepalive_time': (_google_protobuf_UInt32Value__Output | null); /** * The number of seconds between keep-alive probes. Default is to use the OS * level configuration (unless overridden, Linux defaults to 75s.) */ - 'keepalive_interval'?: (_google_protobuf_UInt32Value__Output); + 'keepalive_interval': (_google_protobuf_UInt32Value__Output | null); } diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/TcpProtocolOptions.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/TcpProtocolOptions.ts similarity index 70% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/TcpProtocolOptions.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/TcpProtocolOptions.ts index bb7afc1d1..28239f63e 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/TcpProtocolOptions.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/TcpProtocolOptions.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/protocol.proto +// Original file: deps/envoy-api/envoy/config/core/v3/protocol.proto /** diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/TrafficDirection.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/TrafficDirection.ts new file mode 100644 index 000000000..e450d43cb --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/TrafficDirection.ts @@ -0,0 +1,44 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto + +/** + * Identifies the direction of the traffic relative to the local Envoy. + */ +export const TrafficDirection = { + /** + * Default option is unspecified. + */ + UNSPECIFIED: 'UNSPECIFIED', + /** + * The transport is used for incoming traffic. + */ + INBOUND: 'INBOUND', + /** + * The transport is used for outgoing traffic. + */ + OUTBOUND: 'OUTBOUND', +} as const; + +/** + * Identifies the direction of the traffic relative to the local Envoy. + */ +export type TrafficDirection = + /** + * Default option is unspecified. + */ + | 'UNSPECIFIED' + | 0 + /** + * The transport is used for incoming traffic. + */ + | 'INBOUND' + | 1 + /** + * The transport is used for outgoing traffic. + */ + | 'OUTBOUND' + | 2 + +/** + * Identifies the direction of the traffic relative to the local Envoy. + */ +export type TrafficDirection__Output = typeof TrafficDirection[keyof typeof TrafficDirection] diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/TransportSocket.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/TransportSocket.ts similarity index 69% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/core/TransportSocket.ts rename to packages/grpc-js-xds/src/generated/envoy/config/core/v3/TransportSocket.ts index b45767eb2..ff05991ad 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/core/TransportSocket.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/TransportSocket.ts @@ -1,11 +1,10 @@ -// Original file: deps/envoy-api/envoy/api/v2/core/base.proto +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto -import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../google/protobuf/Struct'; import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; /** * Configuration for transport socket in :ref:`listeners ` and - * :ref:`clusters `. If the configuration is + * :ref:`clusters `. If the configuration is * empty, a default transport socket implementation and configuration will be * chosen based on the platform and existence of tls_context. */ @@ -15,18 +14,17 @@ export interface TransportSocket { * socket implementation. */ 'name'?: (string); - 'config'?: (_google_protobuf_Struct); - 'typed_config'?: (_google_protobuf_Any); + 'typed_config'?: (_google_protobuf_Any | null); /** * Implementation specific configuration which depends on the implementation being instantiated. * See the supported transport socket implementations for further documentation. */ - 'config_type'?: "config"|"typed_config"; + 'config_type'?: "typed_config"; } /** * Configuration for transport socket in :ref:`listeners ` and - * :ref:`clusters `. If the configuration is + * :ref:`clusters `. If the configuration is * empty, a default transport socket implementation and configuration will be * chosen based on the platform and existence of tls_context. */ @@ -36,11 +34,10 @@ export interface TransportSocket__Output { * socket implementation. */ 'name': (string); - 'config'?: (_google_protobuf_Struct__Output); - 'typed_config'?: (_google_protobuf_Any__Output); + 'typed_config'?: (_google_protobuf_Any__Output | null); /** * Implementation specific configuration which depends on the implementation being instantiated. * See the supported transport socket implementations for further documentation. */ - 'config_type': "config"|"typed_config"; + 'config_type': "typed_config"; } diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/TypedExtensionConfig.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/TypedExtensionConfig.ts new file mode 100644 index 000000000..d46751d4f --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/TypedExtensionConfig.ts @@ -0,0 +1,45 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/extension.proto + +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; + +/** + * Message type for extension configuration. + * [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.]. + */ +export interface TypedExtensionConfig { + /** + * The name of an extension. This is not used to select the extension, instead + * it serves the role of an opaque identifier. + */ + 'name'?: (string); + /** + * The typed config for the extension. The type URL will be used to identify + * the extension. In the case that the type URL is ``xds.type.v3.TypedStruct`` + * (or, for historical reasons, ``udpa.type.v1.TypedStruct``), the inner type + * URL of ``TypedStruct`` will be utilized. See the + * :ref:`extension configuration overview + * ` for further details. + */ + 'typed_config'?: (_google_protobuf_Any | null); +} + +/** + * Message type for extension configuration. + * [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.]. + */ +export interface TypedExtensionConfig__Output { + /** + * The name of an extension. This is not used to select the extension, instead + * it serves the role of an opaque identifier. + */ + 'name': (string); + /** + * The typed config for the extension. The type URL will be used to identify + * the extension. In the case that the type URL is ``xds.type.v3.TypedStruct`` + * (or, for historical reasons, ``udpa.type.v1.TypedStruct``), the inner type + * URL of ``TypedStruct`` will be utilized. See the + * :ref:`extension configuration overview + * ` for further details. + */ + 'typed_config': (_google_protobuf_Any__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/UdpSocketConfig.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/UdpSocketConfig.ts new file mode 100644 index 000000000..f5e38b2b4 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/UdpSocketConfig.ts @@ -0,0 +1,44 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/udp_socket_config.proto + +import type { UInt64Value as _google_protobuf_UInt64Value, UInt64Value__Output as _google_protobuf_UInt64Value__Output } from '../../../../google/protobuf/UInt64Value'; +import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; + +/** + * Generic UDP socket configuration. + */ +export interface UdpSocketConfig { + /** + * The maximum size of received UDP datagrams. Using a larger size will cause Envoy to allocate + * more memory per socket. Received datagrams above this size will be dropped. If not set + * defaults to 1500 bytes. + */ + 'max_rx_datagram_size'?: (_google_protobuf_UInt64Value | null); + /** + * Configures whether Generic Receive Offload (GRO) + * _ is preferred when reading from the + * UDP socket. The default is context dependent and is documented where UdpSocketConfig is used. + * This option affects performance but not functionality. If GRO is not supported by the operating + * system, non-GRO receive will be used. + */ + 'prefer_gro'?: (_google_protobuf_BoolValue | null); +} + +/** + * Generic UDP socket configuration. + */ +export interface UdpSocketConfig__Output { + /** + * The maximum size of received UDP datagrams. Using a larger size will cause Envoy to allocate + * more memory per socket. Received datagrams above this size will be dropped. If not set + * defaults to 1500 bytes. + */ + 'max_rx_datagram_size': (_google_protobuf_UInt64Value__Output | null); + /** + * Configures whether Generic Receive Offload (GRO) + * _ is preferred when reading from the + * UDP socket. The default is context dependent and is documented where UdpSocketConfig is used. + * This option affects performance but not functionality. If GRO is not supported by the operating + * system, non-GRO receive will be used. + */ + 'prefer_gro': (_google_protobuf_BoolValue__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/UpstreamHttpProtocolOptions.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/UpstreamHttpProtocolOptions.ts new file mode 100644 index 000000000..91e9f0b53 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/UpstreamHttpProtocolOptions.ts @@ -0,0 +1,62 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/protocol.proto + + +export interface UpstreamHttpProtocolOptions { + /** + * Set transport socket `SNI `_ for new + * upstream connections based on the downstream HTTP host/authority header or any other arbitrary + * header when :ref:`override_auto_sni_header ` + * is set, as seen by the :ref:`router filter `. + * Does nothing if a filter before the http router filter sets the corresponding metadata. + */ + 'auto_sni'?: (boolean); + /** + * Automatic validate upstream presented certificate for new upstream connections based on the + * downstream HTTP host/authority header or any other arbitrary header when :ref:`override_auto_sni_header ` + * is set, as seen by the :ref:`router filter `. + * This field is intended to be set with ``auto_sni`` field. + * Does nothing if a filter before the http router filter sets the corresponding metadata. + */ + 'auto_san_validation'?: (boolean); + /** + * An optional alternative to the host/authority header to be used for setting the SNI value. + * It should be a valid downstream HTTP header, as seen by the + * :ref:`router filter `. + * If unset, host/authority header will be used for populating the SNI. If the specified header + * is not found or the value is empty, host/authority header will be used instead. + * This field is intended to be set with ``auto_sni`` and/or ``auto_san_validation`` fields. + * If none of these fields are set then setting this would be a no-op. + * Does nothing if a filter before the http router filter sets the corresponding metadata. + */ + 'override_auto_sni_header'?: (string); +} + +export interface UpstreamHttpProtocolOptions__Output { + /** + * Set transport socket `SNI `_ for new + * upstream connections based on the downstream HTTP host/authority header or any other arbitrary + * header when :ref:`override_auto_sni_header ` + * is set, as seen by the :ref:`router filter `. + * Does nothing if a filter before the http router filter sets the corresponding metadata. + */ + 'auto_sni': (boolean); + /** + * Automatic validate upstream presented certificate for new upstream connections based on the + * downstream HTTP host/authority header or any other arbitrary header when :ref:`override_auto_sni_header ` + * is set, as seen by the :ref:`router filter `. + * This field is intended to be set with ``auto_sni`` field. + * Does nothing if a filter before the http router filter sets the corresponding metadata. + */ + 'auto_san_validation': (boolean); + /** + * An optional alternative to the host/authority header to be used for setting the SNI value. + * It should be a valid downstream HTTP header, as seen by the + * :ref:`router filter `. + * If unset, host/authority header will be used for populating the SNI. If the specified header + * is not found or the value is empty, host/authority header will be used instead. + * This field is intended to be set with ``auto_sni`` and/or ``auto_san_validation`` fields. + * If none of these fields are set then setting this would be a no-op. + * Does nothing if a filter before the http router filter sets the corresponding metadata. + */ + 'override_auto_sni_header': (string); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/core/v3/WatchedDirectory.ts b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/WatchedDirectory.ts new file mode 100644 index 000000000..d6f0d124b --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/core/v3/WatchedDirectory.ts @@ -0,0 +1,24 @@ +// Original file: deps/envoy-api/envoy/config/core/v3/base.proto + + +/** + * A directory that is watched for changes, e.g. by inotify on Linux. Move/rename + * events inside this directory trigger the watch. + */ +export interface WatchedDirectory { + /** + * Directory path to watch. + */ + 'path'?: (string); +} + +/** + * A directory that is watched for changes, e.g. by inotify on Linux. Move/rename + * events inside this directory trigger the watch. + */ +export interface WatchedDirectory__Output { + /** + * Directory path to watch. + */ + 'path': (string); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/ClusterLoadAssignment.ts b/packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/ClusterLoadAssignment.ts similarity index 65% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/ClusterLoadAssignment.ts rename to packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/ClusterLoadAssignment.ts index 14598bec1..91ce2e0c8 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/ClusterLoadAssignment.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/ClusterLoadAssignment.ts @@ -1,15 +1,15 @@ -// Original file: deps/envoy-api/envoy/api/v2/endpoint.proto +// Original file: deps/envoy-api/envoy/config/endpoint/v3/endpoint.proto -import type { LocalityLbEndpoints as _envoy_api_v2_endpoint_LocalityLbEndpoints, LocalityLbEndpoints__Output as _envoy_api_v2_endpoint_LocalityLbEndpoints__Output } from '../../../envoy/api/v2/endpoint/LocalityLbEndpoints'; -import type { Endpoint as _envoy_api_v2_endpoint_Endpoint, Endpoint__Output as _envoy_api_v2_endpoint_Endpoint__Output } from '../../../envoy/api/v2/endpoint/Endpoint'; -import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../google/protobuf/UInt32Value'; -import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../google/protobuf/Duration'; -import type { FractionalPercent as _envoy_type_FractionalPercent, FractionalPercent__Output as _envoy_type_FractionalPercent__Output } from '../../../envoy/type/FractionalPercent'; +import type { LocalityLbEndpoints as _envoy_config_endpoint_v3_LocalityLbEndpoints, LocalityLbEndpoints__Output as _envoy_config_endpoint_v3_LocalityLbEndpoints__Output } from '../../../../envoy/config/endpoint/v3/LocalityLbEndpoints'; +import type { Endpoint as _envoy_config_endpoint_v3_Endpoint, Endpoint__Output as _envoy_config_endpoint_v3_Endpoint__Output } from '../../../../envoy/config/endpoint/v3/Endpoint'; +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; +import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; +import type { FractionalPercent as _envoy_type_v3_FractionalPercent, FractionalPercent__Output as _envoy_type_v3_FractionalPercent__Output } from '../../../../envoy/type/v3/FractionalPercent'; /** * [#not-implemented-hide:] */ -export interface _envoy_api_v2_ClusterLoadAssignment_Policy_DropOverload { +export interface _envoy_config_endpoint_v3_ClusterLoadAssignment_Policy_DropOverload { /** * Identifier for the policy specifying the drop. */ @@ -17,13 +17,13 @@ export interface _envoy_api_v2_ClusterLoadAssignment_Policy_DropOverload { /** * Percentage of traffic that should be dropped for the category. */ - 'drop_percentage'?: (_envoy_type_FractionalPercent); + 'drop_percentage'?: (_envoy_type_v3_FractionalPercent | null); } /** * [#not-implemented-hide:] */ -export interface _envoy_api_v2_ClusterLoadAssignment_Policy_DropOverload__Output { +export interface _envoy_config_endpoint_v3_ClusterLoadAssignment_Policy_DropOverload__Output { /** * Identifier for the policy specifying the drop. */ @@ -31,14 +31,14 @@ export interface _envoy_api_v2_ClusterLoadAssignment_Policy_DropOverload__Output /** * Percentage of traffic that should be dropped for the category. */ - 'drop_percentage'?: (_envoy_type_FractionalPercent__Output); + 'drop_percentage': (_envoy_type_v3_FractionalPercent__Output | null); } /** * Load balancing policy settings. * [#next-free-field: 6] */ -export interface _envoy_api_v2_ClusterLoadAssignment_Policy { +export interface _envoy_config_endpoint_v3_ClusterLoadAssignment_Policy { /** * Action to trim the overall incoming traffic to protect the upstream * hosts. This action allows protection in case the hosts are unable to @@ -61,11 +61,11 @@ export interface _envoy_api_v2_ClusterLoadAssignment_Policy { * actual_outgoing_load = 20% // remaining after applying all categories. * [#not-implemented-hide:] */ - 'drop_overloads'?: (_envoy_api_v2_ClusterLoadAssignment_Policy_DropOverload)[]; + 'drop_overloads'?: (_envoy_config_endpoint_v3_ClusterLoadAssignment_Policy_DropOverload)[]; /** * Priority levels and localities are considered overprovisioned with this * factor (in percentage). This means that we don't consider a priority - * level or locality unhealthy until the percentage of healthy hosts + * level or locality unhealthy until the fraction of healthy hosts * multiplied by the overprovisioning factor drops below 100. * With the default value 140(1.4), Envoy doesn't consider a priority level * or a locality unhealthy until their percentage of healthy hosts drops @@ -78,32 +78,21 @@ export interface _envoy_api_v2_ClusterLoadAssignment_Policy { * Read more at :ref:`priority levels ` and * :ref:`localities `. */ - 'overprovisioning_factor'?: (_google_protobuf_UInt32Value); + 'overprovisioning_factor'?: (_google_protobuf_UInt32Value | null); /** * The max time until which the endpoints from this assignment can be used. * If no new assignments are received before this time expires the endpoints * are considered stale and should be marked unhealthy. * Defaults to 0 which means endpoints never go stale. */ - 'endpoint_stale_after'?: (_google_protobuf_Duration); - /** - * The flag to disable overprovisioning. If it is set to true, - * :ref:`overprovisioning factor - * ` will be ignored - * and Envoy will not perform graceful failover between priority levels or - * localities as endpoints become unhealthy. Otherwise Envoy will perform - * graceful failover as :ref:`overprovisioning factor - * ` suggests. - * [#not-implemented-hide:] - */ - 'disable_overprovisioning'?: (boolean); + 'endpoint_stale_after'?: (_google_protobuf_Duration | null); } /** * Load balancing policy settings. * [#next-free-field: 6] */ -export interface _envoy_api_v2_ClusterLoadAssignment_Policy__Output { +export interface _envoy_config_endpoint_v3_ClusterLoadAssignment_Policy__Output { /** * Action to trim the overall incoming traffic to protect the upstream * hosts. This action allows protection in case the hosts are unable to @@ -126,11 +115,11 @@ export interface _envoy_api_v2_ClusterLoadAssignment_Policy__Output { * actual_outgoing_load = 20% // remaining after applying all categories. * [#not-implemented-hide:] */ - 'drop_overloads': (_envoy_api_v2_ClusterLoadAssignment_Policy_DropOverload__Output)[]; + 'drop_overloads': (_envoy_config_endpoint_v3_ClusterLoadAssignment_Policy_DropOverload__Output)[]; /** * Priority levels and localities are considered overprovisioned with this * factor (in percentage). This means that we don't consider a priority - * level or locality unhealthy until the percentage of healthy hosts + * level or locality unhealthy until the fraction of healthy hosts * multiplied by the overprovisioning factor drops below 100. * With the default value 140(1.4), Envoy doesn't consider a priority level * or a locality unhealthy until their percentage of healthy hosts drops @@ -143,25 +132,14 @@ export interface _envoy_api_v2_ClusterLoadAssignment_Policy__Output { * Read more at :ref:`priority levels ` and * :ref:`localities `. */ - 'overprovisioning_factor'?: (_google_protobuf_UInt32Value__Output); + 'overprovisioning_factor': (_google_protobuf_UInt32Value__Output | null); /** * The max time until which the endpoints from this assignment can be used. * If no new assignments are received before this time expires the endpoints * are considered stale and should be marked unhealthy. * Defaults to 0 which means endpoints never go stale. */ - 'endpoint_stale_after'?: (_google_protobuf_Duration__Output); - /** - * The flag to disable overprovisioning. If it is set to true, - * :ref:`overprovisioning factor - * ` will be ignored - * and Envoy will not perform graceful failover between priority levels or - * localities as endpoints become unhealthy. Otherwise Envoy will perform - * graceful failover as :ref:`overprovisioning factor - * ` suggests. - * [#not-implemented-hide:] - */ - 'disable_overprovisioning': (boolean); + 'endpoint_stale_after': (_google_protobuf_Duration__Output | null); } /** @@ -179,24 +157,24 @@ export interface _envoy_api_v2_ClusterLoadAssignment_Policy__Output { export interface ClusterLoadAssignment { /** * Name of the cluster. This will be the :ref:`service_name - * ` value if specified + * ` value if specified * in the cluster :ref:`EdsClusterConfig - * `. + * `. */ 'cluster_name'?: (string); /** * List of endpoints to load balance to. */ - 'endpoints'?: (_envoy_api_v2_endpoint_LocalityLbEndpoints)[]; + 'endpoints'?: (_envoy_config_endpoint_v3_LocalityLbEndpoints)[]; /** * Load balancing policy settings. */ - 'policy'?: (_envoy_api_v2_ClusterLoadAssignment_Policy); + 'policy'?: (_envoy_config_endpoint_v3_ClusterLoadAssignment_Policy | null); /** * Map of named endpoints that can be referenced in LocalityLbEndpoints. * [#not-implemented-hide:] */ - 'named_endpoints'?: ({[key: string]: _envoy_api_v2_endpoint_Endpoint}); + 'named_endpoints'?: ({[key: string]: _envoy_config_endpoint_v3_Endpoint}); } /** @@ -214,22 +192,22 @@ export interface ClusterLoadAssignment { export interface ClusterLoadAssignment__Output { /** * Name of the cluster. This will be the :ref:`service_name - * ` value if specified + * ` value if specified * in the cluster :ref:`EdsClusterConfig - * `. + * `. */ 'cluster_name': (string); /** * List of endpoints to load balance to. */ - 'endpoints': (_envoy_api_v2_endpoint_LocalityLbEndpoints__Output)[]; + 'endpoints': (_envoy_config_endpoint_v3_LocalityLbEndpoints__Output)[]; /** * Load balancing policy settings. */ - 'policy'?: (_envoy_api_v2_ClusterLoadAssignment_Policy__Output); + 'policy': (_envoy_config_endpoint_v3_ClusterLoadAssignment_Policy__Output | null); /** * Map of named endpoints that can be referenced in LocalityLbEndpoints. * [#not-implemented-hide:] */ - 'named_endpoints'?: ({[key: string]: _envoy_api_v2_endpoint_Endpoint__Output}); + 'named_endpoints': ({[key: string]: _envoy_config_endpoint_v3_Endpoint__Output}); } diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/endpoint/ClusterStats.ts b/packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/ClusterStats.ts similarity index 65% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/endpoint/ClusterStats.ts rename to packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/ClusterStats.ts index 4b5c30f4c..d65383885 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/endpoint/ClusterStats.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/ClusterStats.ts @@ -1,10 +1,10 @@ -// Original file: deps/envoy-api/envoy/api/v2/endpoint/load_report.proto +// Original file: deps/envoy-api/envoy/config/endpoint/v3/load_report.proto -import type { UpstreamLocalityStats as _envoy_api_v2_endpoint_UpstreamLocalityStats, UpstreamLocalityStats__Output as _envoy_api_v2_endpoint_UpstreamLocalityStats__Output } from '../../../../envoy/api/v2/endpoint/UpstreamLocalityStats'; +import type { UpstreamLocalityStats as _envoy_config_endpoint_v3_UpstreamLocalityStats, UpstreamLocalityStats__Output as _envoy_config_endpoint_v3_UpstreamLocalityStats__Output } from '../../../../envoy/config/endpoint/v3/UpstreamLocalityStats'; import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; import type { Long } from '@grpc/proto-loader'; -export interface _envoy_api_v2_endpoint_ClusterStats_DroppedRequests { +export interface _envoy_config_endpoint_v3_ClusterStats_DroppedRequests { /** * Identifier for the policy specifying the drop. */ @@ -15,7 +15,7 @@ export interface _envoy_api_v2_endpoint_ClusterStats_DroppedRequests { 'dropped_count'?: (number | string | Long); } -export interface _envoy_api_v2_endpoint_ClusterStats_DroppedRequests__Output { +export interface _envoy_config_endpoint_v3_ClusterStats_DroppedRequests__Output { /** * Identifier for the policy specifying the drop. */ @@ -28,8 +28,7 @@ export interface _envoy_api_v2_endpoint_ClusterStats_DroppedRequests__Output { /** * Per cluster load stats. Envoy reports these stats a management server in a - * :ref:`LoadStatsRequest` - * [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. + * :ref:`LoadStatsRequest` * Next ID: 7 * [#next-free-field: 7] */ @@ -41,7 +40,7 @@ export interface ClusterStats { /** * Need at least one. */ - 'upstream_locality_stats'?: (_envoy_api_v2_endpoint_UpstreamLocalityStats)[]; + 'upstream_locality_stats'?: (_envoy_config_endpoint_v3_UpstreamLocalityStats)[]; /** * Cluster-level stats such as total_successful_requests may be computed by * summing upstream_locality_stats. In addition, below there are additional @@ -53,16 +52,16 @@ export interface ClusterStats { 'total_dropped_requests'?: (number | string | Long); /** * Period over which the actual load report occurred. This will be guaranteed to include every - * request reported. Due to system load and delays between the *LoadStatsRequest* sent from Envoy - * and the *LoadStatsResponse* message sent from the management server, this may be longer than - * the requested load reporting interval in the *LoadStatsResponse*. + * request reported. Due to system load and delays between the ``LoadStatsRequest`` sent from Envoy + * and the ``LoadStatsResponse`` message sent from the management server, this may be longer than + * the requested load reporting interval in the ``LoadStatsResponse``. */ - 'load_report_interval'?: (_google_protobuf_Duration); + 'load_report_interval'?: (_google_protobuf_Duration | null); /** * Information about deliberately dropped requests for each category specified * in the DropOverload policy. */ - 'dropped_requests'?: (_envoy_api_v2_endpoint_ClusterStats_DroppedRequests)[]; + 'dropped_requests'?: (_envoy_config_endpoint_v3_ClusterStats_DroppedRequests)[]; /** * The eds_cluster_config service_name of the cluster. * It's possible that two clusters send the same service_name to EDS, @@ -73,8 +72,7 @@ export interface ClusterStats { /** * Per cluster load stats. Envoy reports these stats a management server in a - * :ref:`LoadStatsRequest` - * [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. + * :ref:`LoadStatsRequest` * Next ID: 7 * [#next-free-field: 7] */ @@ -86,7 +84,7 @@ export interface ClusterStats__Output { /** * Need at least one. */ - 'upstream_locality_stats': (_envoy_api_v2_endpoint_UpstreamLocalityStats__Output)[]; + 'upstream_locality_stats': (_envoy_config_endpoint_v3_UpstreamLocalityStats__Output)[]; /** * Cluster-level stats such as total_successful_requests may be computed by * summing upstream_locality_stats. In addition, below there are additional @@ -98,16 +96,16 @@ export interface ClusterStats__Output { 'total_dropped_requests': (string); /** * Period over which the actual load report occurred. This will be guaranteed to include every - * request reported. Due to system load and delays between the *LoadStatsRequest* sent from Envoy - * and the *LoadStatsResponse* message sent from the management server, this may be longer than - * the requested load reporting interval in the *LoadStatsResponse*. + * request reported. Due to system load and delays between the ``LoadStatsRequest`` sent from Envoy + * and the ``LoadStatsResponse`` message sent from the management server, this may be longer than + * the requested load reporting interval in the ``LoadStatsResponse``. */ - 'load_report_interval'?: (_google_protobuf_Duration__Output); + 'load_report_interval': (_google_protobuf_Duration__Output | null); /** * Information about deliberately dropped requests for each category specified * in the DropOverload policy. */ - 'dropped_requests': (_envoy_api_v2_endpoint_ClusterStats_DroppedRequests__Output)[]; + 'dropped_requests': (_envoy_config_endpoint_v3_ClusterStats_DroppedRequests__Output)[]; /** * The eds_cluster_config service_name of the cluster. * It's possible that two clusters send the same service_name to EDS, diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/endpoint/Endpoint.ts b/packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/Endpoint.ts similarity index 58% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/endpoint/Endpoint.ts rename to packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/Endpoint.ts index 68bef75e1..ef56d7068 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/endpoint/Endpoint.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/Endpoint.ts @@ -1,11 +1,11 @@ -// Original file: deps/envoy-api/envoy/api/v2/endpoint/endpoint_components.proto +// Original file: deps/envoy-api/envoy/config/endpoint/v3/endpoint_components.proto -import type { Address as _envoy_api_v2_core_Address, Address__Output as _envoy_api_v2_core_Address__Output } from '../../../../envoy/api/v2/core/Address'; +import type { Address as _envoy_config_core_v3_Address, Address__Output as _envoy_config_core_v3_Address__Output } from '../../../../envoy/config/core/v3/Address'; /** * The optional health check configuration. */ -export interface _envoy_api_v2_endpoint_Endpoint_HealthCheckConfig { +export interface _envoy_config_endpoint_v3_Endpoint_HealthCheckConfig { /** * Optional alternative health check port value. * @@ -17,18 +17,31 @@ export interface _envoy_api_v2_endpoint_Endpoint_HealthCheckConfig { 'port_value'?: (number); /** * By default, the host header for L7 health checks is controlled by cluster level configuration - * (see: :ref:`host ` and - * :ref:`authority `). Setting this + * (see: :ref:`host ` and + * :ref:`authority `). Setting this * to a non-empty value allows overriding the cluster level configuration for a specific * endpoint. */ 'hostname'?: (string); + /** + * Optional alternative health check host address. + * + * .. attention:: + * + * The form of the health check host address is expected to be a direct IP address. + */ + 'address'?: (_envoy_config_core_v3_Address | null); + /** + * Optional flag to control if perform active health check for this endpoint. + * Active health check is enabled by default if there is a health checker. + */ + 'disable_active_health_check'?: (boolean); } /** * The optional health check configuration. */ -export interface _envoy_api_v2_endpoint_Endpoint_HealthCheckConfig__Output { +export interface _envoy_config_endpoint_v3_Endpoint_HealthCheckConfig__Output { /** * Optional alternative health check port value. * @@ -40,12 +53,25 @@ export interface _envoy_api_v2_endpoint_Endpoint_HealthCheckConfig__Output { 'port_value': (number); /** * By default, the host header for L7 health checks is controlled by cluster level configuration - * (see: :ref:`host ` and - * :ref:`authority `). Setting this + * (see: :ref:`host ` and + * :ref:`authority `). Setting this * to a non-empty value allows overriding the cluster level configuration for a specific * endpoint. */ 'hostname': (string); + /** + * Optional alternative health check host address. + * + * .. attention:: + * + * The form of the health check host address is expected to be a direct IP address. + */ + 'address': (_envoy_config_core_v3_Address__Output | null); + /** + * Optional flag to control if perform active health check for this endpoint. + * Active health check is enabled by default if there is a health checker. + */ + 'disable_active_health_check': (boolean); } /** @@ -59,11 +85,11 @@ export interface Endpoint { * * The form of host address depends on the given cluster type. For STATIC or EDS, * it is expected to be a direct IP address (or something resolvable by the - * specified :ref:`resolver ` + * specified :ref:`resolver ` * in the Address). For LOGICAL or STRICT DNS, it is expected to be hostname, * and will be resolved via DNS. */ - 'address'?: (_envoy_api_v2_core_Address); + 'address'?: (_envoy_config_core_v3_Address | null); /** * The optional health check configuration is used as configuration for the * health checker to contact the health checked host. @@ -73,12 +99,12 @@ export interface Endpoint { * This takes into effect only for upstream clusters with * :ref:`active health checking ` enabled. */ - 'health_check_config'?: (_envoy_api_v2_endpoint_Endpoint_HealthCheckConfig); + 'health_check_config'?: (_envoy_config_endpoint_v3_Endpoint_HealthCheckConfig | null); /** * The hostname associated with this endpoint. This hostname is not used for routing or address * resolution. If provided, it will be associated with the endpoint, and can be used for features * that require a hostname, like - * :ref:`auto_host_rewrite `. + * :ref:`auto_host_rewrite `. */ 'hostname'?: (string); } @@ -94,11 +120,11 @@ export interface Endpoint__Output { * * The form of host address depends on the given cluster type. For STATIC or EDS, * it is expected to be a direct IP address (or something resolvable by the - * specified :ref:`resolver ` + * specified :ref:`resolver ` * in the Address). For LOGICAL or STRICT DNS, it is expected to be hostname, * and will be resolved via DNS. */ - 'address'?: (_envoy_api_v2_core_Address__Output); + 'address': (_envoy_config_core_v3_Address__Output | null); /** * The optional health check configuration is used as configuration for the * health checker to contact the health checked host. @@ -108,12 +134,12 @@ export interface Endpoint__Output { * This takes into effect only for upstream clusters with * :ref:`active health checking ` enabled. */ - 'health_check_config'?: (_envoy_api_v2_endpoint_Endpoint_HealthCheckConfig__Output); + 'health_check_config': (_envoy_config_endpoint_v3_Endpoint_HealthCheckConfig__Output | null); /** * The hostname associated with this endpoint. This hostname is not used for routing or address * resolution. If provided, it will be associated with the endpoint, and can be used for features * that require a hostname, like - * :ref:`auto_host_rewrite `. + * :ref:`auto_host_rewrite `. */ 'hostname': (string); } diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/endpoint/EndpointLoadMetricStats.ts b/packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/EndpointLoadMetricStats.ts similarity index 75% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/endpoint/EndpointLoadMetricStats.ts rename to packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/EndpointLoadMetricStats.ts index 9ed8016b7..50f63f4ca 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/endpoint/EndpointLoadMetricStats.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/EndpointLoadMetricStats.ts @@ -1,10 +1,7 @@ -// Original file: deps/envoy-api/envoy/api/v2/endpoint/load_report.proto +// Original file: deps/envoy-api/envoy/config/endpoint/v3/load_report.proto import type { Long } from '@grpc/proto-loader'; -/** - * [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. - */ export interface EndpointLoadMetricStats { /** * Name of the metric; may be empty. @@ -21,9 +18,6 @@ export interface EndpointLoadMetricStats { 'total_metric_value'?: (number | string); } -/** - * [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. - */ export interface EndpointLoadMetricStats__Output { /** * Name of the metric; may be empty. @@ -37,5 +31,5 @@ export interface EndpointLoadMetricStats__Output { * Sum of metric values across all calls that finished with this metric for * load_reporting_interval. */ - 'total_metric_value': (number | string); + 'total_metric_value': (number); } diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/endpoint/LbEndpoint.ts b/packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/LbEndpoint.ts similarity index 54% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/endpoint/LbEndpoint.ts rename to packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/LbEndpoint.ts index 1f8be9305..4130eb838 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/endpoint/LbEndpoint.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/LbEndpoint.ts @@ -1,8 +1,8 @@ -// Original file: deps/envoy-api/envoy/api/v2/endpoint/endpoint_components.proto +// Original file: deps/envoy-api/envoy/config/endpoint/v3/endpoint_components.proto -import type { Endpoint as _envoy_api_v2_endpoint_Endpoint, Endpoint__Output as _envoy_api_v2_endpoint_Endpoint__Output } from '../../../../envoy/api/v2/endpoint/Endpoint'; -import type { HealthStatus as _envoy_api_v2_core_HealthStatus } from '../../../../envoy/api/v2/core/HealthStatus'; -import type { Metadata as _envoy_api_v2_core_Metadata, Metadata__Output as _envoy_api_v2_core_Metadata__Output } from '../../../../envoy/api/v2/core/Metadata'; +import type { Endpoint as _envoy_config_endpoint_v3_Endpoint, Endpoint__Output as _envoy_config_endpoint_v3_Endpoint__Output } from '../../../../envoy/config/endpoint/v3/Endpoint'; +import type { HealthStatus as _envoy_config_core_v3_HealthStatus, HealthStatus__Output as _envoy_config_core_v3_HealthStatus__Output } from '../../../../envoy/config/core/v3/HealthStatus'; +import type { Metadata as _envoy_config_core_v3_Metadata, Metadata__Output as _envoy_config_core_v3_Metadata__Output } from '../../../../envoy/config/core/v3/Metadata'; import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; /** @@ -10,21 +10,21 @@ import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output a * [#next-free-field: 6] */ export interface LbEndpoint { - 'endpoint'?: (_envoy_api_v2_endpoint_Endpoint); + 'endpoint'?: (_envoy_config_endpoint_v3_Endpoint | null); /** * Optional health status when known and supplied by EDS server. */ - 'health_status'?: (_envoy_api_v2_core_HealthStatus | keyof typeof _envoy_api_v2_core_HealthStatus); + 'health_status'?: (_envoy_config_core_v3_HealthStatus); /** * The endpoint metadata specifies values that may be used by the load * balancer to select endpoints in a cluster for a given request. The filter - * name should be specified as *envoy.lb*. An example boolean key-value pair - * is *canary*, providing the optional canary status of the upstream host. + * name should be specified as ``envoy.lb``. An example boolean key-value pair + * is ``canary``, providing the optional canary status of the upstream host. * This may be matched against in a route's - * :ref:`RouteAction ` metadata_match field + * :ref:`RouteAction ` metadata_match field * to subset the endpoints considered in cluster load balancing. */ - 'metadata'?: (_envoy_api_v2_core_Metadata); + 'metadata'?: (_envoy_config_core_v3_Metadata | null); /** * The optional load balancing weight of the upstream host; at least 1. * Envoy uses the load balancing weight in some of the built in load @@ -32,11 +32,11 @@ export interface LbEndpoint { * of the weights of all endpoints in the endpoint's locality to produce a * percentage of traffic for the endpoint. This percentage is then further * weighted by the endpoint's locality's load balancing weight from - * LocalityLbEndpoints. If unspecified, each host is presumed to have equal - * weight in a locality. The sum of the weights of all endpoints in the - * endpoint's locality must not exceed uint32_t maximal value (4294967295). + * LocalityLbEndpoints. If unspecified, will be treated as 1. The sum + * of the weights of all endpoints in the endpoint's locality must not + * exceed uint32_t maximal value (4294967295). */ - 'load_balancing_weight'?: (_google_protobuf_UInt32Value); + 'load_balancing_weight'?: (_google_protobuf_UInt32Value | null); /** * [#not-implemented-hide:] */ @@ -52,21 +52,21 @@ export interface LbEndpoint { * [#next-free-field: 6] */ export interface LbEndpoint__Output { - 'endpoint'?: (_envoy_api_v2_endpoint_Endpoint__Output); + 'endpoint'?: (_envoy_config_endpoint_v3_Endpoint__Output | null); /** * Optional health status when known and supplied by EDS server. */ - 'health_status': (keyof typeof _envoy_api_v2_core_HealthStatus); + 'health_status': (_envoy_config_core_v3_HealthStatus__Output); /** * The endpoint metadata specifies values that may be used by the load * balancer to select endpoints in a cluster for a given request. The filter - * name should be specified as *envoy.lb*. An example boolean key-value pair - * is *canary*, providing the optional canary status of the upstream host. + * name should be specified as ``envoy.lb``. An example boolean key-value pair + * is ``canary``, providing the optional canary status of the upstream host. * This may be matched against in a route's - * :ref:`RouteAction ` metadata_match field + * :ref:`RouteAction ` metadata_match field * to subset the endpoints considered in cluster load balancing. */ - 'metadata'?: (_envoy_api_v2_core_Metadata__Output); + 'metadata': (_envoy_config_core_v3_Metadata__Output | null); /** * The optional load balancing weight of the upstream host; at least 1. * Envoy uses the load balancing weight in some of the built in load @@ -74,11 +74,11 @@ export interface LbEndpoint__Output { * of the weights of all endpoints in the endpoint's locality to produce a * percentage of traffic for the endpoint. This percentage is then further * weighted by the endpoint's locality's load balancing weight from - * LocalityLbEndpoints. If unspecified, each host is presumed to have equal - * weight in a locality. The sum of the weights of all endpoints in the - * endpoint's locality must not exceed uint32_t maximal value (4294967295). + * LocalityLbEndpoints. If unspecified, will be treated as 1. The sum + * of the weights of all endpoints in the endpoint's locality must not + * exceed uint32_t maximal value (4294967295). */ - 'load_balancing_weight'?: (_google_protobuf_UInt32Value__Output); + 'load_balancing_weight': (_google_protobuf_UInt32Value__Output | null); /** * [#not-implemented-hide:] */ diff --git a/packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/LedsClusterLocalityConfig.ts b/packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/LedsClusterLocalityConfig.ts new file mode 100644 index 000000000..1229d33a3 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/LedsClusterLocalityConfig.ts @@ -0,0 +1,35 @@ +// Original file: deps/envoy-api/envoy/config/endpoint/v3/endpoint_components.proto + +import type { ConfigSource as _envoy_config_core_v3_ConfigSource, ConfigSource__Output as _envoy_config_core_v3_ConfigSource__Output } from '../../../../envoy/config/core/v3/ConfigSource'; + +/** + * [#not-implemented-hide:] + * A configuration for a LEDS collection. + */ +export interface LedsClusterLocalityConfig { + /** + * Configuration for the source of LEDS updates for a Locality. + */ + 'leds_config'?: (_envoy_config_core_v3_ConfigSource | null); + /** + * The xDS transport protocol glob collection resource name. + * The service is only supported in delta xDS (incremental) mode. + */ + 'leds_collection_name'?: (string); +} + +/** + * [#not-implemented-hide:] + * A configuration for a LEDS collection. + */ +export interface LedsClusterLocalityConfig__Output { + /** + * Configuration for the source of LEDS updates for a Locality. + */ + 'leds_config': (_envoy_config_core_v3_ConfigSource__Output | null); + /** + * The xDS transport protocol glob collection resource name. + * The service is only supported in delta xDS (incremental) mode. + */ + 'leds_collection_name': (string); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/endpoint/LocalityLbEndpoints.ts b/packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/LocalityLbEndpoints.ts similarity index 55% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/endpoint/LocalityLbEndpoints.ts rename to packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/LocalityLbEndpoints.ts index 557d97072..4540792d6 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/endpoint/LocalityLbEndpoints.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/LocalityLbEndpoints.ts @@ -1,25 +1,43 @@ -// Original file: deps/envoy-api/envoy/api/v2/endpoint/endpoint_components.proto +// Original file: deps/envoy-api/envoy/config/endpoint/v3/endpoint_components.proto -import type { Locality as _envoy_api_v2_core_Locality, Locality__Output as _envoy_api_v2_core_Locality__Output } from '../../../../envoy/api/v2/core/Locality'; -import type { LbEndpoint as _envoy_api_v2_endpoint_LbEndpoint, LbEndpoint__Output as _envoy_api_v2_endpoint_LbEndpoint__Output } from '../../../../envoy/api/v2/endpoint/LbEndpoint'; +import type { Locality as _envoy_config_core_v3_Locality, Locality__Output as _envoy_config_core_v3_Locality__Output } from '../../../../envoy/config/core/v3/Locality'; +import type { LbEndpoint as _envoy_config_endpoint_v3_LbEndpoint, LbEndpoint__Output as _envoy_config_endpoint_v3_LbEndpoint__Output } from '../../../../envoy/config/endpoint/v3/LbEndpoint'; import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; +import type { LedsClusterLocalityConfig as _envoy_config_endpoint_v3_LedsClusterLocalityConfig, LedsClusterLocalityConfig__Output as _envoy_config_endpoint_v3_LedsClusterLocalityConfig__Output } from '../../../../envoy/config/endpoint/v3/LedsClusterLocalityConfig'; + +/** + * [#not-implemented-hide:] + * A list of endpoints of a specific locality. + */ +export interface _envoy_config_endpoint_v3_LocalityLbEndpoints_LbEndpointList { + 'lb_endpoints'?: (_envoy_config_endpoint_v3_LbEndpoint)[]; +} + +/** + * [#not-implemented-hide:] + * A list of endpoints of a specific locality. + */ +export interface _envoy_config_endpoint_v3_LocalityLbEndpoints_LbEndpointList__Output { + 'lb_endpoints': (_envoy_config_endpoint_v3_LbEndpoint__Output)[]; +} /** * A group of endpoints belonging to a Locality. - * One can have multiple LocalityLbEndpoints for a locality, but this is - * generally only done if the different groups need to have different load - * balancing weights or different priorities. - * [#next-free-field: 7] + * One can have multiple LocalityLbEndpoints for a locality, but only if + * they have different priorities. + * [#next-free-field: 9] */ export interface LocalityLbEndpoints { /** * Identifies location of where the upstream hosts run. */ - 'locality'?: (_envoy_api_v2_core_Locality); + 'locality'?: (_envoy_config_core_v3_Locality | null); /** * The group of endpoints belonging to the locality specified. + * [#comment:TODO(adisuissa): Once LEDS is implemented this field needs to be + * deprecated and replaced by ``load_balancer_endpoints``.] */ - 'lb_endpoints'?: (_envoy_api_v2_endpoint_LbEndpoint)[]; + 'lb_endpoints'?: (_envoy_config_endpoint_v3_LbEndpoint)[]; /** * Optional: Per priority/region/zone/sub_zone weight; at least 1. The load * balancing weight for a locality is divided by the sum of the weights of all @@ -33,7 +51,7 @@ export interface LocalityLbEndpoints { * specified when locality weighted load balancing is enabled, the locality is * assigned no load. */ - 'load_balancing_weight'?: (_google_protobuf_UInt32Value); + 'load_balancing_weight'?: (_google_protobuf_UInt32Value | null); /** * Optional: the priority for this LocalityLbEndpoints. If unspecified this will * default to the highest priority (0). @@ -54,25 +72,40 @@ export interface LocalityLbEndpoints { * to determine where to route the requests. * [#not-implemented-hide:] */ - 'proximity'?: (_google_protobuf_UInt32Value); + 'proximity'?: (_google_protobuf_UInt32Value | null); + /** + * The group of endpoints belonging to the locality. + * [#comment:TODO(adisuissa): Once LEDS is implemented the ``lb_endpoints`` field + * needs to be deprecated.] + */ + 'load_balancer_endpoints'?: (_envoy_config_endpoint_v3_LocalityLbEndpoints_LbEndpointList | null); + /** + * LEDS Configuration for the current locality. + */ + 'leds_cluster_locality_config'?: (_envoy_config_endpoint_v3_LedsClusterLocalityConfig | null); + /** + * [#not-implemented-hide:] + */ + 'lb_config'?: "load_balancer_endpoints"|"leds_cluster_locality_config"; } /** * A group of endpoints belonging to a Locality. - * One can have multiple LocalityLbEndpoints for a locality, but this is - * generally only done if the different groups need to have different load - * balancing weights or different priorities. - * [#next-free-field: 7] + * One can have multiple LocalityLbEndpoints for a locality, but only if + * they have different priorities. + * [#next-free-field: 9] */ export interface LocalityLbEndpoints__Output { /** * Identifies location of where the upstream hosts run. */ - 'locality'?: (_envoy_api_v2_core_Locality__Output); + 'locality': (_envoy_config_core_v3_Locality__Output | null); /** * The group of endpoints belonging to the locality specified. + * [#comment:TODO(adisuissa): Once LEDS is implemented this field needs to be + * deprecated and replaced by ``load_balancer_endpoints``.] */ - 'lb_endpoints': (_envoy_api_v2_endpoint_LbEndpoint__Output)[]; + 'lb_endpoints': (_envoy_config_endpoint_v3_LbEndpoint__Output)[]; /** * Optional: Per priority/region/zone/sub_zone weight; at least 1. The load * balancing weight for a locality is divided by the sum of the weights of all @@ -86,7 +119,7 @@ export interface LocalityLbEndpoints__Output { * specified when locality weighted load balancing is enabled, the locality is * assigned no load. */ - 'load_balancing_weight'?: (_google_protobuf_UInt32Value__Output); + 'load_balancing_weight': (_google_protobuf_UInt32Value__Output | null); /** * Optional: the priority for this LocalityLbEndpoints. If unspecified this will * default to the highest priority (0). @@ -107,5 +140,19 @@ export interface LocalityLbEndpoints__Output { * to determine where to route the requests. * [#not-implemented-hide:] */ - 'proximity'?: (_google_protobuf_UInt32Value__Output); + 'proximity': (_google_protobuf_UInt32Value__Output | null); + /** + * The group of endpoints belonging to the locality. + * [#comment:TODO(adisuissa): Once LEDS is implemented the ``lb_endpoints`` field + * needs to be deprecated.] + */ + 'load_balancer_endpoints'?: (_envoy_config_endpoint_v3_LocalityLbEndpoints_LbEndpointList__Output | null); + /** + * LEDS Configuration for the current locality. + */ + 'leds_cluster_locality_config'?: (_envoy_config_endpoint_v3_LedsClusterLocalityConfig__Output | null); + /** + * [#not-implemented-hide:] + */ + 'lb_config': "load_balancer_endpoints"|"leds_cluster_locality_config"; } diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/endpoint/UpstreamEndpointStats.ts b/packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/UpstreamEndpointStats.ts similarity index 75% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/endpoint/UpstreamEndpointStats.ts rename to packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/UpstreamEndpointStats.ts index 5b5c62b32..ab06afb39 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/endpoint/UpstreamEndpointStats.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/UpstreamEndpointStats.ts @@ -1,19 +1,18 @@ -// Original file: deps/envoy-api/envoy/api/v2/endpoint/load_report.proto +// Original file: deps/envoy-api/envoy/config/endpoint/v3/load_report.proto -import type { Address as _envoy_api_v2_core_Address, Address__Output as _envoy_api_v2_core_Address__Output } from '../../../../envoy/api/v2/core/Address'; -import type { EndpointLoadMetricStats as _envoy_api_v2_endpoint_EndpointLoadMetricStats, EndpointLoadMetricStats__Output as _envoy_api_v2_endpoint_EndpointLoadMetricStats__Output } from '../../../../envoy/api/v2/endpoint/EndpointLoadMetricStats'; +import type { Address as _envoy_config_core_v3_Address, Address__Output as _envoy_config_core_v3_Address__Output } from '../../../../envoy/config/core/v3/Address'; +import type { EndpointLoadMetricStats as _envoy_config_endpoint_v3_EndpointLoadMetricStats, EndpointLoadMetricStats__Output as _envoy_config_endpoint_v3_EndpointLoadMetricStats__Output } from '../../../../envoy/config/endpoint/v3/EndpointLoadMetricStats'; import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../google/protobuf/Struct'; import type { Long } from '@grpc/proto-loader'; /** - * [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. * [#next-free-field: 8] */ export interface UpstreamEndpointStats { /** * Upstream host address. */ - 'address'?: (_envoy_api_v2_core_Address); + 'address'?: (_envoy_config_core_v3_Address | null); /** * The total number of requests successfully completed by the endpoints in the * locality. These include non-5xx responses for HTTP, where errors @@ -41,12 +40,12 @@ export interface UpstreamEndpointStats { /** * Stats for multi-dimensional load balancing. */ - 'load_metric_stats'?: (_envoy_api_v2_endpoint_EndpointLoadMetricStats)[]; + 'load_metric_stats'?: (_envoy_config_endpoint_v3_EndpointLoadMetricStats)[]; /** * Opaque and implementation dependent metadata of the * endpoint. Envoy will pass this directly to the management server. */ - 'metadata'?: (_google_protobuf_Struct); + 'metadata'?: (_google_protobuf_Struct | null); /** * The total number of requests that were issued to this endpoint * since the last report. A single TCP connection, HTTP or gRPC @@ -56,14 +55,13 @@ export interface UpstreamEndpointStats { } /** - * [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. * [#next-free-field: 8] */ export interface UpstreamEndpointStats__Output { /** * Upstream host address. */ - 'address'?: (_envoy_api_v2_core_Address__Output); + 'address': (_envoy_config_core_v3_Address__Output | null); /** * The total number of requests successfully completed by the endpoints in the * locality. These include non-5xx responses for HTTP, where errors @@ -91,12 +89,12 @@ export interface UpstreamEndpointStats__Output { /** * Stats for multi-dimensional load balancing. */ - 'load_metric_stats': (_envoy_api_v2_endpoint_EndpointLoadMetricStats__Output)[]; + 'load_metric_stats': (_envoy_config_endpoint_v3_EndpointLoadMetricStats__Output)[]; /** * Opaque and implementation dependent metadata of the * endpoint. Envoy will pass this directly to the management server. */ - 'metadata'?: (_google_protobuf_Struct__Output); + 'metadata': (_google_protobuf_Struct__Output | null); /** * The total number of requests that were issued to this endpoint * since the last report. A single TCP connection, HTTP or gRPC diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/endpoint/UpstreamLocalityStats.ts b/packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/UpstreamLocalityStats.ts similarity index 57% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/endpoint/UpstreamLocalityStats.ts rename to packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/UpstreamLocalityStats.ts index a1b20897e..fbfb05ed6 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/endpoint/UpstreamLocalityStats.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/endpoint/v3/UpstreamLocalityStats.ts @@ -1,16 +1,14 @@ -// Original file: deps/envoy-api/envoy/api/v2/endpoint/load_report.proto +// Original file: deps/envoy-api/envoy/config/endpoint/v3/load_report.proto -import type { Locality as _envoy_api_v2_core_Locality, Locality__Output as _envoy_api_v2_core_Locality__Output } from '../../../../envoy/api/v2/core/Locality'; -import type { EndpointLoadMetricStats as _envoy_api_v2_endpoint_EndpointLoadMetricStats, EndpointLoadMetricStats__Output as _envoy_api_v2_endpoint_EndpointLoadMetricStats__Output } from '../../../../envoy/api/v2/endpoint/EndpointLoadMetricStats'; -import type { UpstreamEndpointStats as _envoy_api_v2_endpoint_UpstreamEndpointStats, UpstreamEndpointStats__Output as _envoy_api_v2_endpoint_UpstreamEndpointStats__Output } from '../../../../envoy/api/v2/endpoint/UpstreamEndpointStats'; +import type { Locality as _envoy_config_core_v3_Locality, Locality__Output as _envoy_config_core_v3_Locality__Output } from '../../../../envoy/config/core/v3/Locality'; +import type { EndpointLoadMetricStats as _envoy_config_endpoint_v3_EndpointLoadMetricStats, EndpointLoadMetricStats__Output as _envoy_config_endpoint_v3_EndpointLoadMetricStats__Output } from '../../../../envoy/config/endpoint/v3/EndpointLoadMetricStats'; +import type { UpstreamEndpointStats as _envoy_config_endpoint_v3_UpstreamEndpointStats, UpstreamEndpointStats__Output as _envoy_config_endpoint_v3_UpstreamEndpointStats__Output } from '../../../../envoy/config/endpoint/v3/UpstreamEndpointStats'; import type { Long } from '@grpc/proto-loader'; /** - * These are stats Envoy reports to GLB every so often. Report frequency is - * defined by - * :ref:`LoadStatsResponse.load_reporting_interval`. + * These are stats Envoy reports to the management server at a frequency defined by + * :ref:`LoadStatsResponse.load_reporting_interval`. * Stats per upstream region/zone and optionally per subzone. - * [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. * [#next-free-field: 9] */ export interface UpstreamLocalityStats { @@ -18,7 +16,7 @@ export interface UpstreamLocalityStats { * Name of zone, region and optionally endpoint group these metrics were * collected from. Zone and region names could be empty if unknown. */ - 'locality'?: (_envoy_api_v2_core_Locality); + 'locality'?: (_envoy_config_core_v3_Locality | null); /** * The total number of requests successfully completed by the endpoints in the * locality. @@ -36,7 +34,7 @@ export interface UpstreamLocalityStats { /** * Stats for multi-dimensional load balancing. */ - 'load_metric_stats'?: (_envoy_api_v2_endpoint_EndpointLoadMetricStats)[]; + 'load_metric_stats'?: (_envoy_config_endpoint_v3_EndpointLoadMetricStats)[]; /** * [#not-implemented-hide:] The priority of the endpoint group these metrics * were collected from. @@ -45,9 +43,9 @@ export interface UpstreamLocalityStats { /** * Endpoint granularity stats information for this locality. This information * is populated if the Server requests it by setting - * :ref:`LoadStatsResponse.report_endpoint_granularity`. + * :ref:`LoadStatsResponse.report_endpoint_granularity`. */ - 'upstream_endpoint_stats'?: (_envoy_api_v2_endpoint_UpstreamEndpointStats)[]; + 'upstream_endpoint_stats'?: (_envoy_config_endpoint_v3_UpstreamEndpointStats)[]; /** * The total number of requests that were issued by this Envoy since * the last report. This information is aggregated over all the @@ -57,11 +55,9 @@ export interface UpstreamLocalityStats { } /** - * These are stats Envoy reports to GLB every so often. Report frequency is - * defined by - * :ref:`LoadStatsResponse.load_reporting_interval`. + * These are stats Envoy reports to the management server at a frequency defined by + * :ref:`LoadStatsResponse.load_reporting_interval`. * Stats per upstream region/zone and optionally per subzone. - * [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. * [#next-free-field: 9] */ export interface UpstreamLocalityStats__Output { @@ -69,7 +65,7 @@ export interface UpstreamLocalityStats__Output { * Name of zone, region and optionally endpoint group these metrics were * collected from. Zone and region names could be empty if unknown. */ - 'locality'?: (_envoy_api_v2_core_Locality__Output); + 'locality': (_envoy_config_core_v3_Locality__Output | null); /** * The total number of requests successfully completed by the endpoints in the * locality. @@ -87,7 +83,7 @@ export interface UpstreamLocalityStats__Output { /** * Stats for multi-dimensional load balancing. */ - 'load_metric_stats': (_envoy_api_v2_endpoint_EndpointLoadMetricStats__Output)[]; + 'load_metric_stats': (_envoy_config_endpoint_v3_EndpointLoadMetricStats__Output)[]; /** * [#not-implemented-hide:] The priority of the endpoint group these metrics * were collected from. @@ -96,9 +92,9 @@ export interface UpstreamLocalityStats__Output { /** * Endpoint granularity stats information for this locality. This information * is populated if the Server requests it by setting - * :ref:`LoadStatsResponse.report_endpoint_granularity`. + * :ref:`LoadStatsResponse.report_endpoint_granularity`. */ - 'upstream_endpoint_stats': (_envoy_api_v2_endpoint_UpstreamEndpointStats__Output)[]; + 'upstream_endpoint_stats': (_envoy_config_endpoint_v3_UpstreamEndpointStats__Output)[]; /** * The total number of requests that were issued by this Envoy since * the last report. This information is aggregated over all the diff --git a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/AccessLog.ts b/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/AccessLog.ts deleted file mode 100644 index 82e056a4b..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/AccessLog.ts +++ /dev/null @@ -1,65 +0,0 @@ -// Original file: deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto - -import type { AccessLogFilter as _envoy_config_filter_accesslog_v2_AccessLogFilter, AccessLogFilter__Output as _envoy_config_filter_accesslog_v2_AccessLogFilter__Output } from '../../../../../envoy/config/filter/accesslog/v2/AccessLogFilter'; -import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../../google/protobuf/Struct'; -import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../../google/protobuf/Any'; - -export interface AccessLog { - /** - * The name of the access log implementation to instantiate. The name must - * match a statically registered access log. Current built-in loggers include: - * - * #. "envoy.access_loggers.file" - * #. "envoy.access_loggers.http_grpc" - * #. "envoy.access_loggers.tcp_grpc" - */ - 'name'?: (string); - /** - * Filter which is used to determine if the access log needs to be written. - */ - 'filter'?: (_envoy_config_filter_accesslog_v2_AccessLogFilter); - 'config'?: (_google_protobuf_Struct); - 'typed_config'?: (_google_protobuf_Any); - /** - * Custom configuration that depends on the access log being instantiated. Built-in - * configurations include: - * - * #. "envoy.access_loggers.file": :ref:`FileAccessLog - * ` - * #. "envoy.access_loggers.http_grpc": :ref:`HttpGrpcAccessLogConfig - * ` - * #. "envoy.access_loggers.tcp_grpc": :ref:`TcpGrpcAccessLogConfig - * ` - */ - 'config_type'?: "config"|"typed_config"; -} - -export interface AccessLog__Output { - /** - * The name of the access log implementation to instantiate. The name must - * match a statically registered access log. Current built-in loggers include: - * - * #. "envoy.access_loggers.file" - * #. "envoy.access_loggers.http_grpc" - * #. "envoy.access_loggers.tcp_grpc" - */ - 'name': (string); - /** - * Filter which is used to determine if the access log needs to be written. - */ - 'filter'?: (_envoy_config_filter_accesslog_v2_AccessLogFilter__Output); - 'config'?: (_google_protobuf_Struct__Output); - 'typed_config'?: (_google_protobuf_Any__Output); - /** - * Custom configuration that depends on the access log being instantiated. Built-in - * configurations include: - * - * #. "envoy.access_loggers.file": :ref:`FileAccessLog - * ` - * #. "envoy.access_loggers.http_grpc": :ref:`HttpGrpcAccessLogConfig - * ` - * #. "envoy.access_loggers.tcp_grpc": :ref:`TcpGrpcAccessLogConfig - * ` - */ - 'config_type': "config"|"typed_config"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/AccessLogFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/AccessLogFilter.ts deleted file mode 100644 index d75c9676c..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/AccessLogFilter.ts +++ /dev/null @@ -1,115 +0,0 @@ -// Original file: deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto - -import type { StatusCodeFilter as _envoy_config_filter_accesslog_v2_StatusCodeFilter, StatusCodeFilter__Output as _envoy_config_filter_accesslog_v2_StatusCodeFilter__Output } from '../../../../../envoy/config/filter/accesslog/v2/StatusCodeFilter'; -import type { DurationFilter as _envoy_config_filter_accesslog_v2_DurationFilter, DurationFilter__Output as _envoy_config_filter_accesslog_v2_DurationFilter__Output } from '../../../../../envoy/config/filter/accesslog/v2/DurationFilter'; -import type { NotHealthCheckFilter as _envoy_config_filter_accesslog_v2_NotHealthCheckFilter, NotHealthCheckFilter__Output as _envoy_config_filter_accesslog_v2_NotHealthCheckFilter__Output } from '../../../../../envoy/config/filter/accesslog/v2/NotHealthCheckFilter'; -import type { TraceableFilter as _envoy_config_filter_accesslog_v2_TraceableFilter, TraceableFilter__Output as _envoy_config_filter_accesslog_v2_TraceableFilter__Output } from '../../../../../envoy/config/filter/accesslog/v2/TraceableFilter'; -import type { RuntimeFilter as _envoy_config_filter_accesslog_v2_RuntimeFilter, RuntimeFilter__Output as _envoy_config_filter_accesslog_v2_RuntimeFilter__Output } from '../../../../../envoy/config/filter/accesslog/v2/RuntimeFilter'; -import type { AndFilter as _envoy_config_filter_accesslog_v2_AndFilter, AndFilter__Output as _envoy_config_filter_accesslog_v2_AndFilter__Output } from '../../../../../envoy/config/filter/accesslog/v2/AndFilter'; -import type { OrFilter as _envoy_config_filter_accesslog_v2_OrFilter, OrFilter__Output as _envoy_config_filter_accesslog_v2_OrFilter__Output } from '../../../../../envoy/config/filter/accesslog/v2/OrFilter'; -import type { HeaderFilter as _envoy_config_filter_accesslog_v2_HeaderFilter, HeaderFilter__Output as _envoy_config_filter_accesslog_v2_HeaderFilter__Output } from '../../../../../envoy/config/filter/accesslog/v2/HeaderFilter'; -import type { ResponseFlagFilter as _envoy_config_filter_accesslog_v2_ResponseFlagFilter, ResponseFlagFilter__Output as _envoy_config_filter_accesslog_v2_ResponseFlagFilter__Output } from '../../../../../envoy/config/filter/accesslog/v2/ResponseFlagFilter'; -import type { GrpcStatusFilter as _envoy_config_filter_accesslog_v2_GrpcStatusFilter, GrpcStatusFilter__Output as _envoy_config_filter_accesslog_v2_GrpcStatusFilter__Output } from '../../../../../envoy/config/filter/accesslog/v2/GrpcStatusFilter'; -import type { ExtensionFilter as _envoy_config_filter_accesslog_v2_ExtensionFilter, ExtensionFilter__Output as _envoy_config_filter_accesslog_v2_ExtensionFilter__Output } from '../../../../../envoy/config/filter/accesslog/v2/ExtensionFilter'; - -/** - * [#next-free-field: 12] - */ -export interface AccessLogFilter { - /** - * Status code filter. - */ - 'status_code_filter'?: (_envoy_config_filter_accesslog_v2_StatusCodeFilter); - /** - * Duration filter. - */ - 'duration_filter'?: (_envoy_config_filter_accesslog_v2_DurationFilter); - /** - * Not health check filter. - */ - 'not_health_check_filter'?: (_envoy_config_filter_accesslog_v2_NotHealthCheckFilter); - /** - * Traceable filter. - */ - 'traceable_filter'?: (_envoy_config_filter_accesslog_v2_TraceableFilter); - /** - * Runtime filter. - */ - 'runtime_filter'?: (_envoy_config_filter_accesslog_v2_RuntimeFilter); - /** - * And filter. - */ - 'and_filter'?: (_envoy_config_filter_accesslog_v2_AndFilter); - /** - * Or filter. - */ - 'or_filter'?: (_envoy_config_filter_accesslog_v2_OrFilter); - /** - * Header filter. - */ - 'header_filter'?: (_envoy_config_filter_accesslog_v2_HeaderFilter); - /** - * Response flag filter. - */ - 'response_flag_filter'?: (_envoy_config_filter_accesslog_v2_ResponseFlagFilter); - /** - * gRPC status filter. - */ - 'grpc_status_filter'?: (_envoy_config_filter_accesslog_v2_GrpcStatusFilter); - /** - * Extension filter. - */ - 'extension_filter'?: (_envoy_config_filter_accesslog_v2_ExtensionFilter); - 'filter_specifier'?: "status_code_filter"|"duration_filter"|"not_health_check_filter"|"traceable_filter"|"runtime_filter"|"and_filter"|"or_filter"|"header_filter"|"response_flag_filter"|"grpc_status_filter"|"extension_filter"; -} - -/** - * [#next-free-field: 12] - */ -export interface AccessLogFilter__Output { - /** - * Status code filter. - */ - 'status_code_filter'?: (_envoy_config_filter_accesslog_v2_StatusCodeFilter__Output); - /** - * Duration filter. - */ - 'duration_filter'?: (_envoy_config_filter_accesslog_v2_DurationFilter__Output); - /** - * Not health check filter. - */ - 'not_health_check_filter'?: (_envoy_config_filter_accesslog_v2_NotHealthCheckFilter__Output); - /** - * Traceable filter. - */ - 'traceable_filter'?: (_envoy_config_filter_accesslog_v2_TraceableFilter__Output); - /** - * Runtime filter. - */ - 'runtime_filter'?: (_envoy_config_filter_accesslog_v2_RuntimeFilter__Output); - /** - * And filter. - */ - 'and_filter'?: (_envoy_config_filter_accesslog_v2_AndFilter__Output); - /** - * Or filter. - */ - 'or_filter'?: (_envoy_config_filter_accesslog_v2_OrFilter__Output); - /** - * Header filter. - */ - 'header_filter'?: (_envoy_config_filter_accesslog_v2_HeaderFilter__Output); - /** - * Response flag filter. - */ - 'response_flag_filter'?: (_envoy_config_filter_accesslog_v2_ResponseFlagFilter__Output); - /** - * gRPC status filter. - */ - 'grpc_status_filter'?: (_envoy_config_filter_accesslog_v2_GrpcStatusFilter__Output); - /** - * Extension filter. - */ - 'extension_filter'?: (_envoy_config_filter_accesslog_v2_ExtensionFilter__Output); - 'filter_specifier': "status_code_filter"|"duration_filter"|"not_health_check_filter"|"traceable_filter"|"runtime_filter"|"and_filter"|"or_filter"|"header_filter"|"response_flag_filter"|"grpc_status_filter"|"extension_filter"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/ComparisonFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/ComparisonFilter.ts deleted file mode 100644 index 8989ba603..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/ComparisonFilter.ts +++ /dev/null @@ -1,48 +0,0 @@ -// Original file: deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto - -import type { RuntimeUInt32 as _envoy_api_v2_core_RuntimeUInt32, RuntimeUInt32__Output as _envoy_api_v2_core_RuntimeUInt32__Output } from '../../../../../envoy/api/v2/core/RuntimeUInt32'; - -// Original file: deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto - -export enum _envoy_config_filter_accesslog_v2_ComparisonFilter_Op { - /** - * = - */ - EQ = 0, - /** - * >= - */ - GE = 1, - /** - * <= - */ - LE = 2, -} - -/** - * Filter on an integer comparison. - */ -export interface ComparisonFilter { - /** - * Comparison operator. - */ - 'op'?: (_envoy_config_filter_accesslog_v2_ComparisonFilter_Op | keyof typeof _envoy_config_filter_accesslog_v2_ComparisonFilter_Op); - /** - * Value to compare against. - */ - 'value'?: (_envoy_api_v2_core_RuntimeUInt32); -} - -/** - * Filter on an integer comparison. - */ -export interface ComparisonFilter__Output { - /** - * Comparison operator. - */ - 'op': (keyof typeof _envoy_config_filter_accesslog_v2_ComparisonFilter_Op); - /** - * Value to compare against. - */ - 'value'?: (_envoy_api_v2_core_RuntimeUInt32__Output); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/DurationFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/DurationFilter.ts deleted file mode 100644 index 52a37cd95..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/DurationFilter.ts +++ /dev/null @@ -1,23 +0,0 @@ -// Original file: deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto - -import type { ComparisonFilter as _envoy_config_filter_accesslog_v2_ComparisonFilter, ComparisonFilter__Output as _envoy_config_filter_accesslog_v2_ComparisonFilter__Output } from '../../../../../envoy/config/filter/accesslog/v2/ComparisonFilter'; - -/** - * Filters on total request duration in milliseconds. - */ -export interface DurationFilter { - /** - * Comparison. - */ - 'comparison'?: (_envoy_config_filter_accesslog_v2_ComparisonFilter); -} - -/** - * Filters on total request duration in milliseconds. - */ -export interface DurationFilter__Output { - /** - * Comparison. - */ - 'comparison'?: (_envoy_config_filter_accesslog_v2_ComparisonFilter__Output); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/GrpcStatusFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/GrpcStatusFilter.ts deleted file mode 100644 index 8e8b0981e..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/GrpcStatusFilter.ts +++ /dev/null @@ -1,56 +0,0 @@ -// Original file: deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto - - -// Original file: deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto - -export enum _envoy_config_filter_accesslog_v2_GrpcStatusFilter_Status { - OK = 0, - CANCELED = 1, - UNKNOWN = 2, - INVALID_ARGUMENT = 3, - DEADLINE_EXCEEDED = 4, - NOT_FOUND = 5, - ALREADY_EXISTS = 6, - PERMISSION_DENIED = 7, - RESOURCE_EXHAUSTED = 8, - FAILED_PRECONDITION = 9, - ABORTED = 10, - OUT_OF_RANGE = 11, - UNIMPLEMENTED = 12, - INTERNAL = 13, - UNAVAILABLE = 14, - DATA_LOSS = 15, - UNAUTHENTICATED = 16, -} - -/** - * Filters gRPC requests based on their response status. If a gRPC status is not provided, the - * filter will infer the status from the HTTP status code. - */ -export interface GrpcStatusFilter { - /** - * Logs only responses that have any one of the gRPC statuses in this field. - */ - 'statuses'?: (_envoy_config_filter_accesslog_v2_GrpcStatusFilter_Status | keyof typeof _envoy_config_filter_accesslog_v2_GrpcStatusFilter_Status)[]; - /** - * If included and set to true, the filter will instead block all responses with a gRPC status or - * inferred gRPC status enumerated in statuses, and allow all other responses. - */ - 'exclude'?: (boolean); -} - -/** - * Filters gRPC requests based on their response status. If a gRPC status is not provided, the - * filter will infer the status from the HTTP status code. - */ -export interface GrpcStatusFilter__Output { - /** - * Logs only responses that have any one of the gRPC statuses in this field. - */ - 'statuses': (keyof typeof _envoy_config_filter_accesslog_v2_GrpcStatusFilter_Status)[]; - /** - * If included and set to true, the filter will instead block all responses with a gRPC status or - * inferred gRPC status enumerated in statuses, and allow all other responses. - */ - 'exclude': (boolean); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/HeaderFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/HeaderFilter.ts deleted file mode 100644 index d40610617..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/HeaderFilter.ts +++ /dev/null @@ -1,25 +0,0 @@ -// Original file: deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto - -import type { HeaderMatcher as _envoy_api_v2_route_HeaderMatcher, HeaderMatcher__Output as _envoy_api_v2_route_HeaderMatcher__Output } from '../../../../../envoy/api/v2/route/HeaderMatcher'; - -/** - * Filters requests based on the presence or value of a request header. - */ -export interface HeaderFilter { - /** - * Only requests with a header which matches the specified HeaderMatcher will pass the filter - * check. - */ - 'header'?: (_envoy_api_v2_route_HeaderMatcher); -} - -/** - * Filters requests based on the presence or value of a request header. - */ -export interface HeaderFilter__Output { - /** - * Only requests with a header which matches the specified HeaderMatcher will pass the filter - * check. - */ - 'header'?: (_envoy_api_v2_route_HeaderMatcher__Output); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/RuntimeFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/RuntimeFilter.ts deleted file mode 100644 index 100ce050b..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/RuntimeFilter.ts +++ /dev/null @@ -1,63 +0,0 @@ -// Original file: deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto - -import type { FractionalPercent as _envoy_type_FractionalPercent, FractionalPercent__Output as _envoy_type_FractionalPercent__Output } from '../../../../../envoy/type/FractionalPercent'; - -/** - * Filters for random sampling of requests. - */ -export interface RuntimeFilter { - /** - * Runtime key to get an optional overridden numerator for use in the *percent_sampled* field. - * If found in runtime, this value will replace the default numerator. - */ - 'runtime_key'?: (string); - /** - * The default sampling percentage. If not specified, defaults to 0% with denominator of 100. - */ - 'percent_sampled'?: (_envoy_type_FractionalPercent); - /** - * By default, sampling pivots on the header - * :ref:`x-request-id` being present. If - * :ref:`x-request-id` is present, the filter will - * consistently sample across multiple hosts based on the runtime key value and the value - * extracted from :ref:`x-request-id`. If it is - * missing, or *use_independent_randomness* is set to true, the filter will randomly sample based - * on the runtime key value alone. *use_independent_randomness* can be used for logging kill - * switches within complex nested :ref:`AndFilter - * ` and :ref:`OrFilter - * ` blocks that are easier to reason about - * from a probability perspective (i.e., setting to true will cause the filter to behave like - * an independent random variable when composed within logical operator filters). - */ - 'use_independent_randomness'?: (boolean); -} - -/** - * Filters for random sampling of requests. - */ -export interface RuntimeFilter__Output { - /** - * Runtime key to get an optional overridden numerator for use in the *percent_sampled* field. - * If found in runtime, this value will replace the default numerator. - */ - 'runtime_key': (string); - /** - * The default sampling percentage. If not specified, defaults to 0% with denominator of 100. - */ - 'percent_sampled'?: (_envoy_type_FractionalPercent__Output); - /** - * By default, sampling pivots on the header - * :ref:`x-request-id` being present. If - * :ref:`x-request-id` is present, the filter will - * consistently sample across multiple hosts based on the runtime key value and the value - * extracted from :ref:`x-request-id`. If it is - * missing, or *use_independent_randomness* is set to true, the filter will randomly sample based - * on the runtime key value alone. *use_independent_randomness* can be used for logging kill - * switches within complex nested :ref:`AndFilter - * ` and :ref:`OrFilter - * ` blocks that are easier to reason about - * from a probability perspective (i.e., setting to true will cause the filter to behave like - * an independent random variable when composed within logical operator filters). - */ - 'use_independent_randomness': (boolean); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/StatusCodeFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/StatusCodeFilter.ts deleted file mode 100644 index d60a80a14..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/config/filter/accesslog/v2/StatusCodeFilter.ts +++ /dev/null @@ -1,23 +0,0 @@ -// Original file: deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto - -import type { ComparisonFilter as _envoy_config_filter_accesslog_v2_ComparisonFilter, ComparisonFilter__Output as _envoy_config_filter_accesslog_v2_ComparisonFilter__Output } from '../../../../../envoy/config/filter/accesslog/v2/ComparisonFilter'; - -/** - * Filters on HTTP response/status code. - */ -export interface StatusCodeFilter { - /** - * Comparison. - */ - 'comparison'?: (_envoy_config_filter_accesslog_v2_ComparisonFilter); -} - -/** - * Filters on HTTP response/status code. - */ -export interface StatusCodeFilter__Output { - /** - * Comparison. - */ - 'comparison'?: (_envoy_config_filter_accesslog_v2_ComparisonFilter__Output); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/filter/network/http_connection_manager/v2/HttpConnectionManager.ts b/packages/grpc-js-xds/src/generated/envoy/config/filter/network/http_connection_manager/v2/HttpConnectionManager.ts deleted file mode 100644 index 1838f89a7..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/config/filter/network/http_connection_manager/v2/HttpConnectionManager.ts +++ /dev/null @@ -1,1039 +0,0 @@ -// Original file: deps/envoy-api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto - -import type { Rds as _envoy_config_filter_network_http_connection_manager_v2_Rds, Rds__Output as _envoy_config_filter_network_http_connection_manager_v2_Rds__Output } from '../../../../../../envoy/config/filter/network/http_connection_manager/v2/Rds'; -import type { RouteConfiguration as _envoy_api_v2_RouteConfiguration, RouteConfiguration__Output as _envoy_api_v2_RouteConfiguration__Output } from '../../../../../../envoy/api/v2/RouteConfiguration'; -import type { HttpFilter as _envoy_config_filter_network_http_connection_manager_v2_HttpFilter, HttpFilter__Output as _envoy_config_filter_network_http_connection_manager_v2_HttpFilter__Output } from '../../../../../../envoy/config/filter/network/http_connection_manager/v2/HttpFilter'; -import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../../../google/protobuf/BoolValue'; -import type { Http1ProtocolOptions as _envoy_api_v2_core_Http1ProtocolOptions, Http1ProtocolOptions__Output as _envoy_api_v2_core_Http1ProtocolOptions__Output } from '../../../../../../envoy/api/v2/core/Http1ProtocolOptions'; -import type { Http2ProtocolOptions as _envoy_api_v2_core_Http2ProtocolOptions, Http2ProtocolOptions__Output as _envoy_api_v2_core_Http2ProtocolOptions__Output } from '../../../../../../envoy/api/v2/core/Http2ProtocolOptions'; -import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../../../google/protobuf/Duration'; -import type { AccessLog as _envoy_config_filter_accesslog_v2_AccessLog, AccessLog__Output as _envoy_config_filter_accesslog_v2_AccessLog__Output } from '../../../../../../envoy/config/filter/accesslog/v2/AccessLog'; -import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../../../google/protobuf/UInt32Value'; -import type { ScopedRoutes as _envoy_config_filter_network_http_connection_manager_v2_ScopedRoutes, ScopedRoutes__Output as _envoy_config_filter_network_http_connection_manager_v2_ScopedRoutes__Output } from '../../../../../../envoy/config/filter/network/http_connection_manager/v2/ScopedRoutes'; -import type { HttpProtocolOptions as _envoy_api_v2_core_HttpProtocolOptions, HttpProtocolOptions__Output as _envoy_api_v2_core_HttpProtocolOptions__Output } from '../../../../../../envoy/api/v2/core/HttpProtocolOptions'; -import type { RequestIDExtension as _envoy_config_filter_network_http_connection_manager_v2_RequestIDExtension, RequestIDExtension__Output as _envoy_config_filter_network_http_connection_manager_v2_RequestIDExtension__Output } from '../../../../../../envoy/config/filter/network/http_connection_manager/v2/RequestIDExtension'; -import type { Percent as _envoy_type_Percent, Percent__Output as _envoy_type_Percent__Output } from '../../../../../../envoy/type/Percent'; -import type { CustomTag as _envoy_type_tracing_v2_CustomTag, CustomTag__Output as _envoy_type_tracing_v2_CustomTag__Output } from '../../../../../../envoy/type/tracing/v2/CustomTag'; -import type { _envoy_config_trace_v2_Tracing_Http, _envoy_config_trace_v2_Tracing_Http__Output } from '../../../../../../envoy/config/trace/v2/Tracing'; - -// Original file: deps/envoy-api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto - -export enum _envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_CodecType { - /** - * For every new connection, the connection manager will determine which - * codec to use. This mode supports both ALPN for TLS listeners as well as - * protocol inference for plaintext listeners. If ALPN data is available, it - * is preferred, otherwise protocol inference is used. In almost all cases, - * this is the right option to choose for this setting. - */ - AUTO = 0, - /** - * The connection manager will assume that the client is speaking HTTP/1.1. - */ - HTTP1 = 1, - /** - * The connection manager will assume that the client is speaking HTTP/2 - * (Envoy does not require HTTP/2 to take place over TLS or to use ALPN. - * Prior knowledge is allowed). - */ - HTTP2 = 2, - /** - * [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with - * caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient - * to distinguish HTTP1 and HTTP2 traffic. - */ - HTTP3 = 3, -} - -// Original file: deps/envoy-api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto - -/** - * How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP - * header. - */ -export enum _envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_ForwardClientCertDetails { - /** - * Do not send the XFCC header to the next hop. This is the default value. - */ - SANITIZE = 0, - /** - * When the client connection is mTLS (Mutual TLS), forward the XFCC header - * in the request. - */ - FORWARD_ONLY = 1, - /** - * When the client connection is mTLS, append the client certificate - * information to the request’s XFCC header and forward it. - */ - APPEND_FORWARD = 2, - /** - * When the client connection is mTLS, reset the XFCC header with the client - * certificate information and send it to the next hop. - */ - SANITIZE_SET = 3, - /** - * Always forward the XFCC header in the request, regardless of whether the - * client connection is mTLS. - */ - ALWAYS_FORWARD_ONLY = 4, -} - -export interface _envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_InternalAddressConfig { - /** - * Whether unix socket addresses should be considered internal. - */ - 'unix_sockets'?: (boolean); -} - -export interface _envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_InternalAddressConfig__Output { - /** - * Whether unix socket addresses should be considered internal. - */ - 'unix_sockets': (boolean); -} - -// Original file: deps/envoy-api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto - -export enum _envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_Tracing_OperationName { - /** - * The HTTP listener is used for ingress/incoming requests. - */ - INGRESS = 0, - /** - * The HTTP listener is used for egress/outgoing requests. - */ - EGRESS = 1, -} - -// Original file: deps/envoy-api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto - -export enum _envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_ServerHeaderTransformation { - /** - * Overwrite any Server header with the contents of server_name. - */ - OVERWRITE = 0, - /** - * If no Server header is present, append Server server_name - * If a Server header is present, pass it through. - */ - APPEND_IF_ABSENT = 1, - /** - * Pass through the value of the server header, and do not append a header - * if none is present. - */ - PASS_THROUGH = 2, -} - -/** - * [#next-free-field: 7] - */ -export interface _envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_SetCurrentClientCertDetails { - /** - * Whether to forward the subject of the client cert. Defaults to false. - */ - 'subject'?: (_google_protobuf_BoolValue); - /** - * Whether to forward the entire client cert in URL encoded PEM format. This will appear in the - * XFCC header comma separated from other values with the value Cert="PEM". - * Defaults to false. - */ - 'cert'?: (boolean); - /** - * Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM - * format. This will appear in the XFCC header comma separated from other values with the value - * Chain="PEM". - * Defaults to false. - */ - 'chain'?: (boolean); - /** - * Whether to forward the DNS type Subject Alternative Names of the client cert. - * Defaults to false. - */ - 'dns'?: (boolean); - /** - * Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to - * false. - */ - 'uri'?: (boolean); -} - -/** - * [#next-free-field: 7] - */ -export interface _envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_SetCurrentClientCertDetails__Output { - /** - * Whether to forward the subject of the client cert. Defaults to false. - */ - 'subject'?: (_google_protobuf_BoolValue__Output); - /** - * Whether to forward the entire client cert in URL encoded PEM format. This will appear in the - * XFCC header comma separated from other values with the value Cert="PEM". - * Defaults to false. - */ - 'cert': (boolean); - /** - * Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM - * format. This will appear in the XFCC header comma separated from other values with the value - * Chain="PEM". - * Defaults to false. - */ - 'chain': (boolean); - /** - * Whether to forward the DNS type Subject Alternative Names of the client cert. - * Defaults to false. - */ - 'dns': (boolean); - /** - * Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to - * false. - */ - 'uri': (boolean); -} - -/** - * [#next-free-field: 10] - */ -export interface _envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_Tracing { - /** - * The span name will be derived from this field. If - * :ref:`traffic_direction ` is - * specified on the parent listener, then it is used instead of this field. - * - * .. attention:: - * This field has been deprecated in favor of `traffic_direction`. - */ - 'operation_name'?: (_envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_Tracing_OperationName | keyof typeof _envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_Tracing_OperationName); - /** - * A list of header names used to create tags for the active span. The header name is used to - * populate the tag name, and the header value is used to populate the tag value. The tag is - * created if the specified header name is present in the request's headers. - * - * .. attention:: - * This field has been deprecated in favor of :ref:`custom_tags - * `. - */ - 'request_headers_for_tags'?: (string)[]; - /** - * Target percentage of requests managed by this HTTP connection manager that will be force - * traced if the :ref:`x-client-trace-id ` - * header is set. This field is a direct analog for the runtime variable - * 'tracing.client_sampling' in the :ref:`HTTP Connection Manager - * `. - * Default: 100% - */ - 'client_sampling'?: (_envoy_type_Percent); - /** - * Target percentage of requests managed by this HTTP connection manager that will be randomly - * selected for trace generation, if not requested by the client or not forced. This field is - * a direct analog for the runtime variable 'tracing.random_sampling' in the - * :ref:`HTTP Connection Manager `. - * Default: 100% - */ - 'random_sampling'?: (_envoy_type_Percent); - /** - * Target percentage of requests managed by this HTTP connection manager that will be traced - * after all other sampling checks have been applied (client-directed, force tracing, random - * sampling). This field functions as an upper limit on the total configured sampling rate. For - * instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% - * of client requests with the appropriate headers to be force traced. This field is a direct - * analog for the runtime variable 'tracing.global_enabled' in the - * :ref:`HTTP Connection Manager `. - * Default: 100% - */ - 'overall_sampling'?: (_envoy_type_Percent); - /** - * Whether to annotate spans with additional data. If true, spans will include logs for stream - * events. - */ - 'verbose'?: (boolean); - /** - * Maximum length of the request path to extract and include in the HttpUrl tag. Used to - * truncate lengthy request paths to meet the needs of a tracing backend. - * Default: 256 - */ - 'max_path_tag_length'?: (_google_protobuf_UInt32Value); - /** - * A list of custom tags with unique tag name to create tags for the active span. - */ - 'custom_tags'?: (_envoy_type_tracing_v2_CustomTag)[]; - /** - * Configuration for an external tracing provider. - * If not specified, no tracing will be performed. - * - * .. attention:: - * Please be aware that *envoy.tracers.opencensus* provider can only be configured once - * in Envoy lifetime. - * Any attempts to reconfigure it or to use different configurations for different HCM filters - * will be rejected. - * Such a constraint is inherent to OpenCensus itself. It cannot be overcome without changes - * on OpenCensus side. - */ - 'provider'?: (_envoy_config_trace_v2_Tracing_Http); -} - -/** - * [#next-free-field: 10] - */ -export interface _envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_Tracing__Output { - /** - * The span name will be derived from this field. If - * :ref:`traffic_direction ` is - * specified on the parent listener, then it is used instead of this field. - * - * .. attention:: - * This field has been deprecated in favor of `traffic_direction`. - */ - 'operation_name': (keyof typeof _envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_Tracing_OperationName); - /** - * A list of header names used to create tags for the active span. The header name is used to - * populate the tag name, and the header value is used to populate the tag value. The tag is - * created if the specified header name is present in the request's headers. - * - * .. attention:: - * This field has been deprecated in favor of :ref:`custom_tags - * `. - */ - 'request_headers_for_tags': (string)[]; - /** - * Target percentage of requests managed by this HTTP connection manager that will be force - * traced if the :ref:`x-client-trace-id ` - * header is set. This field is a direct analog for the runtime variable - * 'tracing.client_sampling' in the :ref:`HTTP Connection Manager - * `. - * Default: 100% - */ - 'client_sampling'?: (_envoy_type_Percent__Output); - /** - * Target percentage of requests managed by this HTTP connection manager that will be randomly - * selected for trace generation, if not requested by the client or not forced. This field is - * a direct analog for the runtime variable 'tracing.random_sampling' in the - * :ref:`HTTP Connection Manager `. - * Default: 100% - */ - 'random_sampling'?: (_envoy_type_Percent__Output); - /** - * Target percentage of requests managed by this HTTP connection manager that will be traced - * after all other sampling checks have been applied (client-directed, force tracing, random - * sampling). This field functions as an upper limit on the total configured sampling rate. For - * instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% - * of client requests with the appropriate headers to be force traced. This field is a direct - * analog for the runtime variable 'tracing.global_enabled' in the - * :ref:`HTTP Connection Manager `. - * Default: 100% - */ - 'overall_sampling'?: (_envoy_type_Percent__Output); - /** - * Whether to annotate spans with additional data. If true, spans will include logs for stream - * events. - */ - 'verbose': (boolean); - /** - * Maximum length of the request path to extract and include in the HttpUrl tag. Used to - * truncate lengthy request paths to meet the needs of a tracing backend. - * Default: 256 - */ - 'max_path_tag_length'?: (_google_protobuf_UInt32Value__Output); - /** - * A list of custom tags with unique tag name to create tags for the active span. - */ - 'custom_tags': (_envoy_type_tracing_v2_CustomTag__Output)[]; - /** - * Configuration for an external tracing provider. - * If not specified, no tracing will be performed. - * - * .. attention:: - * Please be aware that *envoy.tracers.opencensus* provider can only be configured once - * in Envoy lifetime. - * Any attempts to reconfigure it or to use different configurations for different HCM filters - * will be rejected. - * Such a constraint is inherent to OpenCensus itself. It cannot be overcome without changes - * on OpenCensus side. - */ - 'provider'?: (_envoy_config_trace_v2_Tracing_Http__Output); -} - -/** - * The configuration for HTTP upgrades. - * For each upgrade type desired, an UpgradeConfig must be added. - * - * .. warning:: - * - * The current implementation of upgrade headers does not handle - * multi-valued upgrade headers. Support for multi-valued headers may be - * added in the future if needed. - * - * .. warning:: - * The current implementation of upgrade headers does not work with HTTP/2 - * upstreams. - */ -export interface _envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_UpgradeConfig { - /** - * The case-insensitive name of this upgrade, e.g. "websocket". - * For each upgrade type present in upgrade_configs, requests with - * Upgrade: [upgrade_type] - * will be proxied upstream. - */ - 'upgrade_type'?: (string); - /** - * If present, this represents the filter chain which will be created for - * this type of upgrade. If no filters are present, the filter chain for - * HTTP connections will be used for this upgrade type. - */ - 'filters'?: (_envoy_config_filter_network_http_connection_manager_v2_HttpFilter)[]; - /** - * Determines if upgrades are enabled or disabled by default. Defaults to true. - * This can be overridden on a per-route basis with :ref:`cluster - * ` as documented in the - * :ref:`upgrade documentation `. - */ - 'enabled'?: (_google_protobuf_BoolValue); -} - -/** - * The configuration for HTTP upgrades. - * For each upgrade type desired, an UpgradeConfig must be added. - * - * .. warning:: - * - * The current implementation of upgrade headers does not handle - * multi-valued upgrade headers. Support for multi-valued headers may be - * added in the future if needed. - * - * .. warning:: - * The current implementation of upgrade headers does not work with HTTP/2 - * upstreams. - */ -export interface _envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_UpgradeConfig__Output { - /** - * The case-insensitive name of this upgrade, e.g. "websocket". - * For each upgrade type present in upgrade_configs, requests with - * Upgrade: [upgrade_type] - * will be proxied upstream. - */ - 'upgrade_type': (string); - /** - * If present, this represents the filter chain which will be created for - * this type of upgrade. If no filters are present, the filter chain for - * HTTP connections will be used for this upgrade type. - */ - 'filters': (_envoy_config_filter_network_http_connection_manager_v2_HttpFilter__Output)[]; - /** - * Determines if upgrades are enabled or disabled by default. Defaults to true. - * This can be overridden on a per-route basis with :ref:`cluster - * ` as documented in the - * :ref:`upgrade documentation `. - */ - 'enabled'?: (_google_protobuf_BoolValue__Output); -} - -/** - * [#next-free-field: 37] - */ -export interface HttpConnectionManager { - /** - * Supplies the type of codec that the connection manager should use. - */ - 'codec_type'?: (_envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_CodecType | keyof typeof _envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_CodecType); - /** - * The human readable prefix to use when emitting statistics for the - * connection manager. See the :ref:`statistics documentation ` for - * more information. - */ - 'stat_prefix'?: (string); - /** - * The connection manager’s route table will be dynamically loaded via the RDS API. - */ - 'rds'?: (_envoy_config_filter_network_http_connection_manager_v2_Rds); - /** - * The route table for the connection manager is static and is specified in this property. - */ - 'route_config'?: (_envoy_api_v2_RouteConfiguration); - /** - * A list of individual HTTP filters that make up the filter chain for - * requests made to the connection manager. :ref:`Order matters ` - * as the filters are processed sequentially as request events happen. - */ - 'http_filters'?: (_envoy_config_filter_network_http_connection_manager_v2_HttpFilter)[]; - /** - * Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` - * and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked - * documentation for more information. Defaults to false. - */ - 'add_user_agent'?: (_google_protobuf_BoolValue); - /** - * Presence of the object defines whether the connection manager - * emits :ref:`tracing ` data to the :ref:`configured tracing provider - * `. - */ - 'tracing'?: (_envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_Tracing); - /** - * Additional HTTP/1 settings that are passed to the HTTP/1 codec. - */ - 'http_protocol_options'?: (_envoy_api_v2_core_Http1ProtocolOptions); - /** - * Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - */ - 'http2_protocol_options'?: (_envoy_api_v2_core_Http2ProtocolOptions); - /** - * An optional override that the connection manager will write to the server - * header in responses. If not set, the default is *envoy*. - */ - 'server_name'?: (string); - /** - * The idle timeout for connections managed by the connection manager. The - * idle timeout is defined as the period in which there are no active - * requests. If not set, there is no idle timeout. When the idle timeout is - * reached the connection will be closed. If the connection is an HTTP/2 - * connection a drain sequence will occur prior to closing the connection. - * This field is deprecated. Use :ref:`idle_timeout - * ` - * instead. - */ - 'idle_timeout'?: (_google_protobuf_Duration); - /** - * The time that Envoy will wait between sending an HTTP/2 “shutdown - * notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. - * This is used so that Envoy provides a grace period for new streams that - * race with the final GOAWAY frame. During this grace period, Envoy will - * continue to accept new streams. After the grace period, a final GOAWAY - * frame is sent and Envoy will start refusing new streams. Draining occurs - * both when a connection hits the idle timeout or during general server - * draining. The default grace period is 5000 milliseconds (5 seconds) if this - * option is not specified. - */ - 'drain_timeout'?: (_google_protobuf_Duration); - /** - * Configuration for :ref:`HTTP access logs ` - * emitted by the connection manager. - */ - 'access_log'?: (_envoy_config_filter_accesslog_v2_AccessLog)[]; - /** - * If set to true, the connection manager will use the real remote address - * of the client connection when determining internal versus external origin and manipulating - * various headers. If set to false or absent, the connection manager will use the - * :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for - * :ref:`config_http_conn_man_headers_x-forwarded-for`, - * :ref:`config_http_conn_man_headers_x-envoy-internal`, and - * :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. - */ - 'use_remote_address'?: (_google_protobuf_BoolValue); - /** - * Whether the connection manager will generate the :ref:`x-request-id - * ` header if it does not exist. This defaults to - * true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature - * is not desired it can be disabled. - */ - 'generate_request_id'?: (_google_protobuf_BoolValue); - /** - * How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP - * header. - */ - 'forward_client_cert_details'?: (_envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_ForwardClientCertDetails | keyof typeof _envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_ForwardClientCertDetails); - /** - * This field is valid only when :ref:`forward_client_cert_details - * ` - * is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in - * the client certificate to be forwarded. Note that in the - * :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and - * *By* is always set when the client certificate presents the URI type Subject Alternative Name - * value. - */ - 'set_current_client_cert_details'?: (_envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_SetCurrentClientCertDetails); - /** - * If proxy_100_continue is true, Envoy will proxy incoming "Expect: - * 100-continue" headers upstream, and forward "100 Continue" responses - * downstream. If this is false or not set, Envoy will instead strip the - * "Expect: 100-continue" header, and send a "100 Continue" response itself. - */ - 'proxy_100_continue'?: (boolean); - /** - * The number of additional ingress proxy hops from the right side of the - * :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when - * determining the origin client's IP address. The default is zero if this option - * is not specified. See the documentation for - * :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. - */ - 'xff_num_trusted_hops'?: (number); - /** - * If - * :ref:`use_remote_address - * ` - * is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is - * an IPv4 address, the address will be mapped to IPv6 before it is appended to *x-forwarded-for*. - * This is useful for testing compatibility of upstream services that parse the header value. For - * example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses - * `_ for details. This will also affect the - * :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See - * :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 - * ` for runtime - * control. - * [#not-implemented-hide:] - */ - 'represent_ipv4_remote_address_as_ipv4_mapped_ipv6'?: (boolean); - /** - * If set, Envoy will not append the remote address to the - * :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in - * conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager - * has mutated the request headers. While :ref:`use_remote_address - * ` - * will also suppress XFF addition, it has consequences for logging and other - * Envoy uses of the remote address, so *skip_xff_append* should be used - * when only an elision of XFF addition is intended. - */ - 'skip_xff_append'?: (boolean); - /** - * Via header value to append to request and response headers. If this is - * empty, no via header will be appended. - */ - 'via'?: (string); - 'upgrade_configs'?: (_envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_UpgradeConfig)[]; - /** - * The stream idle timeout for connections managed by the connection manager. - * If not specified, this defaults to 5 minutes. The default value was selected - * so as not to interfere with any smaller configured timeouts that may have - * existed in configurations prior to the introduction of this feature, while - * introducing robustness to TCP connections that terminate without a FIN. - * - * This idle timeout applies to new streams and is overridable by the - * :ref:`route-level idle_timeout - * `. Even on a stream in - * which the override applies, prior to receipt of the initial request - * headers, the :ref:`stream_idle_timeout - * ` - * applies. Each time an encode/decode event for headers or data is processed - * for the stream, the timer will be reset. If the timeout fires, the stream - * is terminated with a 408 Request Timeout error code if no upstream response - * header has been received, otherwise a stream reset occurs. - * - * Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due - * to the granularity of events presented to the connection manager. For example, while receiving - * very large request headers, it may be the case that there is traffic regularly arriving on the - * wire while the connection manage is only able to observe the end-of-headers event, hence the - * stream may still idle timeout. - * - * A value of 0 will completely disable the connection manager stream idle - * timeout, although per-route idle timeout overrides will continue to apply. - */ - 'stream_idle_timeout'?: (_google_protobuf_Duration); - /** - * Configures what network addresses are considered internal for stats and header sanitation - * purposes. If unspecified, only RFC1918 IP addresses will be considered internal. - * See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more - * information about internal/external addresses. - */ - 'internal_address_config'?: (_envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_InternalAddressConfig); - /** - * The delayed close timeout is for downstream connections managed by the HTTP connection manager. - * It is defined as a grace period after connection close processing has been locally initiated - * during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy - * from the downstream connection) prior to Envoy closing the socket associated with that - * connection. - * NOTE: This timeout is enforced even when the socket associated with the downstream connection - * is pending a flush of the write buffer. However, any progress made writing data to the socket - * will restart the timer associated with this timeout. This means that the total grace period for - * a socket in this state will be - * +. - * - * Delaying Envoy's connection close and giving the peer the opportunity to initiate the close - * sequence mitigates a race condition that exists when downstream clients do not drain/process - * data in a connection's receive buffer after a remote close has been detected via a socket - * write(). This race leads to such clients failing to process the response code sent by Envoy, - * which could result in erroneous downstream processing. - * - * If the timeout triggers, Envoy will close the connection's socket. - * - * The default timeout is 1000 ms if this option is not specified. - * - * .. NOTE:: - * To be useful in avoiding the race condition described above, this timeout must be set - * to *at least* +<100ms to account for - * a reasonable "worst" case processing time for a full iteration of Envoy's event loop>. - * - * .. WARNING:: - * A value of 0 will completely disable delayed close processing. When disabled, the downstream - * connection's socket will be closed immediately after the write flush is completed or will - * never close if the write flush does not complete. - */ - 'delayed_close_timeout'?: (_google_protobuf_Duration); - /** - * The amount of time that Envoy will wait for the entire request to be received. - * The timer is activated when the request is initiated, and is disarmed when the last byte of the - * request is sent upstream (i.e. all decoding filters have processed the request), OR when the - * response is initiated. If not specified or set to 0, this timeout is disabled. - */ - 'request_timeout'?: (_google_protobuf_Duration); - /** - * The maximum request headers size for incoming connections. - * If unconfigured, the default max request headers allowed is 60 KiB. - * Requests that exceed this limit will receive a 431 response. - * The max configurable limit is 96 KiB, based on current implementation - * constraints. - */ - 'max_request_headers_kb'?: (_google_protobuf_UInt32Value); - /** - * Should paths be normalized according to RFC 3986 before any processing of - * requests by HTTP filters or routing? This affects the upstream *:path* header - * as well. For paths that fail this check, Envoy will respond with 400 to - * paths that are malformed. This defaults to false currently but will default - * true in the future. When not specified, this value may be overridden by the - * runtime variable - * :ref:`http_connection_manager.normalize_path`. - * See `Normalization and Comparison ` - * for details of normalization. - * Note that Envoy does not perform - * `case normalization ` - */ - 'normalize_path'?: (_google_protobuf_BoolValue); - /** - * A route table will be dynamically assigned to each request based on request attributes - * (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are - * specified in this message. - */ - 'scoped_routes'?: (_envoy_config_filter_network_http_connection_manager_v2_ScopedRoutes); - /** - * Whether the connection manager will keep the :ref:`x-request-id - * ` header if passed for a request that is edge - * (Edge request is the request from external clients to front Envoy) and not reset it, which - * is the current Envoy behaviour. This defaults to false. - */ - 'preserve_external_request_id'?: (boolean); - /** - * Determines if adjacent slashes in the path are merged into one before any processing of - * requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without - * setting this option, incoming requests with path `//dir///file` will not match against route - * with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of - * `HTTP spec ` and is provided for convenience. - */ - 'merge_slashes'?: (boolean); - /** - * Defines the action to be applied to the Server header on the response path. - * By default, Envoy will overwrite the header with the value specified in - * server_name. - */ - 'server_header_transformation'?: (_envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_ServerHeaderTransformation | keyof typeof _envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_ServerHeaderTransformation); - /** - * Additional settings for HTTP requests handled by the connection manager. These will be - * applicable to both HTTP1 and HTTP2 requests. - */ - 'common_http_protocol_options'?: (_envoy_api_v2_core_HttpProtocolOptions); - /** - * The configuration of the request ID extension. This includes operations such as - * generation, validation, and associated tracing operations. - * - * If not set, Envoy uses the default UUID-based behavior: - * - * 1. Request ID is propagated using *x-request-id* header. - * - * 2. Request ID is a universally unique identifier (UUID). - * - * 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. - */ - 'request_id_extension'?: (_envoy_config_filter_network_http_connection_manager_v2_RequestIDExtension); - 'route_specifier'?: "rds"|"route_config"|"scoped_routes"; -} - -/** - * [#next-free-field: 37] - */ -export interface HttpConnectionManager__Output { - /** - * Supplies the type of codec that the connection manager should use. - */ - 'codec_type': (keyof typeof _envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_CodecType); - /** - * The human readable prefix to use when emitting statistics for the - * connection manager. See the :ref:`statistics documentation ` for - * more information. - */ - 'stat_prefix': (string); - /** - * The connection manager’s route table will be dynamically loaded via the RDS API. - */ - 'rds'?: (_envoy_config_filter_network_http_connection_manager_v2_Rds__Output); - /** - * The route table for the connection manager is static and is specified in this property. - */ - 'route_config'?: (_envoy_api_v2_RouteConfiguration__Output); - /** - * A list of individual HTTP filters that make up the filter chain for - * requests made to the connection manager. :ref:`Order matters ` - * as the filters are processed sequentially as request events happen. - */ - 'http_filters': (_envoy_config_filter_network_http_connection_manager_v2_HttpFilter__Output)[]; - /** - * Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` - * and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked - * documentation for more information. Defaults to false. - */ - 'add_user_agent'?: (_google_protobuf_BoolValue__Output); - /** - * Presence of the object defines whether the connection manager - * emits :ref:`tracing ` data to the :ref:`configured tracing provider - * `. - */ - 'tracing'?: (_envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_Tracing__Output); - /** - * Additional HTTP/1 settings that are passed to the HTTP/1 codec. - */ - 'http_protocol_options'?: (_envoy_api_v2_core_Http1ProtocolOptions__Output); - /** - * Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. - */ - 'http2_protocol_options'?: (_envoy_api_v2_core_Http2ProtocolOptions__Output); - /** - * An optional override that the connection manager will write to the server - * header in responses. If not set, the default is *envoy*. - */ - 'server_name': (string); - /** - * The idle timeout for connections managed by the connection manager. The - * idle timeout is defined as the period in which there are no active - * requests. If not set, there is no idle timeout. When the idle timeout is - * reached the connection will be closed. If the connection is an HTTP/2 - * connection a drain sequence will occur prior to closing the connection. - * This field is deprecated. Use :ref:`idle_timeout - * ` - * instead. - */ - 'idle_timeout'?: (_google_protobuf_Duration__Output); - /** - * The time that Envoy will wait between sending an HTTP/2 “shutdown - * notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. - * This is used so that Envoy provides a grace period for new streams that - * race with the final GOAWAY frame. During this grace period, Envoy will - * continue to accept new streams. After the grace period, a final GOAWAY - * frame is sent and Envoy will start refusing new streams. Draining occurs - * both when a connection hits the idle timeout or during general server - * draining. The default grace period is 5000 milliseconds (5 seconds) if this - * option is not specified. - */ - 'drain_timeout'?: (_google_protobuf_Duration__Output); - /** - * Configuration for :ref:`HTTP access logs ` - * emitted by the connection manager. - */ - 'access_log': (_envoy_config_filter_accesslog_v2_AccessLog__Output)[]; - /** - * If set to true, the connection manager will use the real remote address - * of the client connection when determining internal versus external origin and manipulating - * various headers. If set to false or absent, the connection manager will use the - * :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for - * :ref:`config_http_conn_man_headers_x-forwarded-for`, - * :ref:`config_http_conn_man_headers_x-envoy-internal`, and - * :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. - */ - 'use_remote_address'?: (_google_protobuf_BoolValue__Output); - /** - * Whether the connection manager will generate the :ref:`x-request-id - * ` header if it does not exist. This defaults to - * true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature - * is not desired it can be disabled. - */ - 'generate_request_id'?: (_google_protobuf_BoolValue__Output); - /** - * How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP - * header. - */ - 'forward_client_cert_details': (keyof typeof _envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_ForwardClientCertDetails); - /** - * This field is valid only when :ref:`forward_client_cert_details - * ` - * is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in - * the client certificate to be forwarded. Note that in the - * :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and - * *By* is always set when the client certificate presents the URI type Subject Alternative Name - * value. - */ - 'set_current_client_cert_details'?: (_envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_SetCurrentClientCertDetails__Output); - /** - * If proxy_100_continue is true, Envoy will proxy incoming "Expect: - * 100-continue" headers upstream, and forward "100 Continue" responses - * downstream. If this is false or not set, Envoy will instead strip the - * "Expect: 100-continue" header, and send a "100 Continue" response itself. - */ - 'proxy_100_continue': (boolean); - /** - * The number of additional ingress proxy hops from the right side of the - * :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when - * determining the origin client's IP address. The default is zero if this option - * is not specified. See the documentation for - * :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. - */ - 'xff_num_trusted_hops': (number); - /** - * If - * :ref:`use_remote_address - * ` - * is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is - * an IPv4 address, the address will be mapped to IPv6 before it is appended to *x-forwarded-for*. - * This is useful for testing compatibility of upstream services that parse the header value. For - * example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses - * `_ for details. This will also affect the - * :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See - * :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 - * ` for runtime - * control. - * [#not-implemented-hide:] - */ - 'represent_ipv4_remote_address_as_ipv4_mapped_ipv6': (boolean); - /** - * If set, Envoy will not append the remote address to the - * :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in - * conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager - * has mutated the request headers. While :ref:`use_remote_address - * ` - * will also suppress XFF addition, it has consequences for logging and other - * Envoy uses of the remote address, so *skip_xff_append* should be used - * when only an elision of XFF addition is intended. - */ - 'skip_xff_append': (boolean); - /** - * Via header value to append to request and response headers. If this is - * empty, no via header will be appended. - */ - 'via': (string); - 'upgrade_configs': (_envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_UpgradeConfig__Output)[]; - /** - * The stream idle timeout for connections managed by the connection manager. - * If not specified, this defaults to 5 minutes. The default value was selected - * so as not to interfere with any smaller configured timeouts that may have - * existed in configurations prior to the introduction of this feature, while - * introducing robustness to TCP connections that terminate without a FIN. - * - * This idle timeout applies to new streams and is overridable by the - * :ref:`route-level idle_timeout - * `. Even on a stream in - * which the override applies, prior to receipt of the initial request - * headers, the :ref:`stream_idle_timeout - * ` - * applies. Each time an encode/decode event for headers or data is processed - * for the stream, the timer will be reset. If the timeout fires, the stream - * is terminated with a 408 Request Timeout error code if no upstream response - * header has been received, otherwise a stream reset occurs. - * - * Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due - * to the granularity of events presented to the connection manager. For example, while receiving - * very large request headers, it may be the case that there is traffic regularly arriving on the - * wire while the connection manage is only able to observe the end-of-headers event, hence the - * stream may still idle timeout. - * - * A value of 0 will completely disable the connection manager stream idle - * timeout, although per-route idle timeout overrides will continue to apply. - */ - 'stream_idle_timeout'?: (_google_protobuf_Duration__Output); - /** - * Configures what network addresses are considered internal for stats and header sanitation - * purposes. If unspecified, only RFC1918 IP addresses will be considered internal. - * See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more - * information about internal/external addresses. - */ - 'internal_address_config'?: (_envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_InternalAddressConfig__Output); - /** - * The delayed close timeout is for downstream connections managed by the HTTP connection manager. - * It is defined as a grace period after connection close processing has been locally initiated - * during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy - * from the downstream connection) prior to Envoy closing the socket associated with that - * connection. - * NOTE: This timeout is enforced even when the socket associated with the downstream connection - * is pending a flush of the write buffer. However, any progress made writing data to the socket - * will restart the timer associated with this timeout. This means that the total grace period for - * a socket in this state will be - * +. - * - * Delaying Envoy's connection close and giving the peer the opportunity to initiate the close - * sequence mitigates a race condition that exists when downstream clients do not drain/process - * data in a connection's receive buffer after a remote close has been detected via a socket - * write(). This race leads to such clients failing to process the response code sent by Envoy, - * which could result in erroneous downstream processing. - * - * If the timeout triggers, Envoy will close the connection's socket. - * - * The default timeout is 1000 ms if this option is not specified. - * - * .. NOTE:: - * To be useful in avoiding the race condition described above, this timeout must be set - * to *at least* +<100ms to account for - * a reasonable "worst" case processing time for a full iteration of Envoy's event loop>. - * - * .. WARNING:: - * A value of 0 will completely disable delayed close processing. When disabled, the downstream - * connection's socket will be closed immediately after the write flush is completed or will - * never close if the write flush does not complete. - */ - 'delayed_close_timeout'?: (_google_protobuf_Duration__Output); - /** - * The amount of time that Envoy will wait for the entire request to be received. - * The timer is activated when the request is initiated, and is disarmed when the last byte of the - * request is sent upstream (i.e. all decoding filters have processed the request), OR when the - * response is initiated. If not specified or set to 0, this timeout is disabled. - */ - 'request_timeout'?: (_google_protobuf_Duration__Output); - /** - * The maximum request headers size for incoming connections. - * If unconfigured, the default max request headers allowed is 60 KiB. - * Requests that exceed this limit will receive a 431 response. - * The max configurable limit is 96 KiB, based on current implementation - * constraints. - */ - 'max_request_headers_kb'?: (_google_protobuf_UInt32Value__Output); - /** - * Should paths be normalized according to RFC 3986 before any processing of - * requests by HTTP filters or routing? This affects the upstream *:path* header - * as well. For paths that fail this check, Envoy will respond with 400 to - * paths that are malformed. This defaults to false currently but will default - * true in the future. When not specified, this value may be overridden by the - * runtime variable - * :ref:`http_connection_manager.normalize_path`. - * See `Normalization and Comparison ` - * for details of normalization. - * Note that Envoy does not perform - * `case normalization ` - */ - 'normalize_path'?: (_google_protobuf_BoolValue__Output); - /** - * A route table will be dynamically assigned to each request based on request attributes - * (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are - * specified in this message. - */ - 'scoped_routes'?: (_envoy_config_filter_network_http_connection_manager_v2_ScopedRoutes__Output); - /** - * Whether the connection manager will keep the :ref:`x-request-id - * ` header if passed for a request that is edge - * (Edge request is the request from external clients to front Envoy) and not reset it, which - * is the current Envoy behaviour. This defaults to false. - */ - 'preserve_external_request_id': (boolean); - /** - * Determines if adjacent slashes in the path are merged into one before any processing of - * requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without - * setting this option, incoming requests with path `//dir///file` will not match against route - * with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of - * `HTTP spec ` and is provided for convenience. - */ - 'merge_slashes': (boolean); - /** - * Defines the action to be applied to the Server header on the response path. - * By default, Envoy will overwrite the header with the value specified in - * server_name. - */ - 'server_header_transformation': (keyof typeof _envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_ServerHeaderTransformation); - /** - * Additional settings for HTTP requests handled by the connection manager. These will be - * applicable to both HTTP1 and HTTP2 requests. - */ - 'common_http_protocol_options'?: (_envoy_api_v2_core_HttpProtocolOptions__Output); - /** - * The configuration of the request ID extension. This includes operations such as - * generation, validation, and associated tracing operations. - * - * If not set, Envoy uses the default UUID-based behavior: - * - * 1. Request ID is propagated using *x-request-id* header. - * - * 2. Request ID is a universally unique identifier (UUID). - * - * 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. - */ - 'request_id_extension'?: (_envoy_config_filter_network_http_connection_manager_v2_RequestIDExtension__Output); - 'route_specifier': "rds"|"route_config"|"scoped_routes"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/filter/network/http_connection_manager/v2/HttpFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/filter/network/http_connection_manager/v2/HttpFilter.ts deleted file mode 100644 index 84e4292fc..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/config/filter/network/http_connection_manager/v2/HttpFilter.ts +++ /dev/null @@ -1,34 +0,0 @@ -// Original file: deps/envoy-api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto - -import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../../../google/protobuf/Struct'; -import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../../../google/protobuf/Any'; - -export interface HttpFilter { - /** - * The name of the filter to instantiate. The name must match a - * :ref:`supported filter `. - */ - 'name'?: (string); - 'config'?: (_google_protobuf_Struct); - 'typed_config'?: (_google_protobuf_Any); - /** - * Filter specific configuration which depends on the filter being instantiated. See the supported - * filters for further documentation. - */ - 'config_type'?: "config"|"typed_config"; -} - -export interface HttpFilter__Output { - /** - * The name of the filter to instantiate. The name must match a - * :ref:`supported filter `. - */ - 'name': (string); - 'config'?: (_google_protobuf_Struct__Output); - 'typed_config'?: (_google_protobuf_Any__Output); - /** - * Filter specific configuration which depends on the filter being instantiated. See the supported - * filters for further documentation. - */ - 'config_type': "config"|"typed_config"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/filter/network/http_connection_manager/v2/ScopedRds.ts b/packages/grpc-js-xds/src/generated/envoy/config/filter/network/http_connection_manager/v2/ScopedRds.ts deleted file mode 100644 index b3d89dffb..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/config/filter/network/http_connection_manager/v2/ScopedRds.ts +++ /dev/null @@ -1,17 +0,0 @@ -// Original file: deps/envoy-api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto - -import type { ConfigSource as _envoy_api_v2_core_ConfigSource, ConfigSource__Output as _envoy_api_v2_core_ConfigSource__Output } from '../../../../../../envoy/api/v2/core/ConfigSource'; - -export interface ScopedRds { - /** - * Configuration source specifier for scoped RDS. - */ - 'scoped_rds_config_source'?: (_envoy_api_v2_core_ConfigSource); -} - -export interface ScopedRds__Output { - /** - * Configuration source specifier for scoped RDS. - */ - 'scoped_rds_config_source'?: (_envoy_api_v2_core_ConfigSource__Output); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/filter/network/http_connection_manager/v2/ScopedRouteConfigurationsList.ts b/packages/grpc-js-xds/src/generated/envoy/config/filter/network/http_connection_manager/v2/ScopedRouteConfigurationsList.ts deleted file mode 100644 index a57f1725e..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/config/filter/network/http_connection_manager/v2/ScopedRouteConfigurationsList.ts +++ /dev/null @@ -1,17 +0,0 @@ -// Original file: deps/envoy-api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto - -import type { ScopedRouteConfiguration as _envoy_api_v2_ScopedRouteConfiguration, ScopedRouteConfiguration__Output as _envoy_api_v2_ScopedRouteConfiguration__Output } from '../../../../../../envoy/api/v2/ScopedRouteConfiguration'; - -/** - * This message is used to work around the limitations with 'oneof' and repeated fields. - */ -export interface ScopedRouteConfigurationsList { - 'scoped_route_configurations'?: (_envoy_api_v2_ScopedRouteConfiguration)[]; -} - -/** - * This message is used to work around the limitations with 'oneof' and repeated fields. - */ -export interface ScopedRouteConfigurationsList__Output { - 'scoped_route_configurations': (_envoy_api_v2_ScopedRouteConfiguration__Output)[]; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/listener/ActiveRawUdpListenerConfig.ts b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ActiveRawUdpListenerConfig.ts similarity index 56% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/listener/ActiveRawUdpListenerConfig.ts rename to packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ActiveRawUdpListenerConfig.ts index 7bb47c26c..3cc895aa9 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/listener/ActiveRawUdpListenerConfig.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ActiveRawUdpListenerConfig.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/api/v2/listener/udp_listener_config.proto +// Original file: deps/envoy-api/envoy/config/listener/v3/udp_listener_config.proto export interface ActiveRawUdpListenerConfig { diff --git a/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/AdditionalAddress.ts b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/AdditionalAddress.ts new file mode 100644 index 000000000..cc1f5e42f --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/AdditionalAddress.ts @@ -0,0 +1,38 @@ +// Original file: deps/envoy-api/envoy/config/listener/v3/listener.proto + +import type { Address as _envoy_config_core_v3_Address, Address__Output as _envoy_config_core_v3_Address__Output } from '../../../../envoy/config/core/v3/Address'; +import type { SocketOptionsOverride as _envoy_config_core_v3_SocketOptionsOverride, SocketOptionsOverride__Output as _envoy_config_core_v3_SocketOptionsOverride__Output } from '../../../../envoy/config/core/v3/SocketOptionsOverride'; + +/** + * The additional address the listener is listening on. + */ +export interface AdditionalAddress { + 'address'?: (_envoy_config_core_v3_Address | null); + /** + * Additional socket options that may not be present in Envoy source code or + * precompiled binaries. If specified, this will override the + * :ref:`socket_options ` + * in the listener. If specified with no + * :ref:`socket_options ` + * or an empty list of :ref:`socket_options `, + * it means no socket option will apply. + */ + 'socket_options'?: (_envoy_config_core_v3_SocketOptionsOverride | null); +} + +/** + * The additional address the listener is listening on. + */ +export interface AdditionalAddress__Output { + 'address': (_envoy_config_core_v3_Address__Output | null); + /** + * Additional socket options that may not be present in Envoy source code or + * precompiled binaries. If specified, this will override the + * :ref:`socket_options ` + * in the listener. If specified with no + * :ref:`socket_options ` + * or an empty list of :ref:`socket_options `, + * it means no socket option will apply. + */ + 'socket_options': (_envoy_config_core_v3_SocketOptionsOverride__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/listener/v2/ApiListener.ts b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ApiListener.ts similarity index 74% rename from packages/grpc-js-xds/src/generated/envoy/config/listener/v2/ApiListener.ts rename to packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ApiListener.ts index f683d3e82..5a8e7f37f 100644 --- a/packages/grpc-js-xds/src/generated/envoy/config/listener/v2/ApiListener.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ApiListener.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/config/listener/v2/api_listener.proto +// Original file: deps/envoy-api/envoy/config/listener/v3/api_listener.proto import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; @@ -10,14 +10,15 @@ export interface ApiListener { /** * The type in this field determines the type of API listener. At present, the following * types are supported: - * envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager (HTTP) + * envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager (HTTP) + * envoy.extensions.filters.network.http_connection_manager.v3.EnvoyMobileHttpConnectionManager (HTTP) * [#next-major-version: In the v3 API, replace this Any field with a oneof containing the * specific config message for each type of API listener. We could not do this in v2 because * it would have caused circular dependencies for go protos: lds.proto depends on this file, * and http_connection_manager.proto depends on rds.proto, which is in the same directory as * lds.proto, so lds.proto cannot depend on this file.] */ - 'api_listener'?: (_google_protobuf_Any); + 'api_listener'?: (_google_protobuf_Any | null); } /** @@ -28,12 +29,13 @@ export interface ApiListener__Output { /** * The type in this field determines the type of API listener. At present, the following * types are supported: - * envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager (HTTP) + * envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager (HTTP) + * envoy.extensions.filters.network.http_connection_manager.v3.EnvoyMobileHttpConnectionManager (HTTP) * [#next-major-version: In the v3 API, replace this Any field with a oneof containing the * specific config message for each type of API listener. We could not do this in v2 because * it would have caused circular dependencies for go protos: lds.proto depends on this file, * and http_connection_manager.proto depends on rds.proto, which is in the same directory as * lds.proto, so lds.proto cannot depend on this file.] */ - 'api_listener'?: (_google_protobuf_Any__Output); + 'api_listener': (_google_protobuf_Any__Output | null); } diff --git a/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ApiListenerManager.ts b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ApiListenerManager.ts new file mode 100644 index 000000000..125875161 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ApiListenerManager.ts @@ -0,0 +1,18 @@ +// Original file: deps/envoy-api/envoy/config/listener/v3/listener.proto + + +/** + * A placeholder proto so that users can explicitly configure the API + * Listener Manager via the bootstrap's :ref:`listener_manager `. + * [#not-implemented-hide:] + */ +export interface ApiListenerManager { +} + +/** + * A placeholder proto so that users can explicitly configure the API + * Listener Manager via the bootstrap's :ref:`listener_manager `. + * [#not-implemented-hide:] + */ +export interface ApiListenerManager__Output { +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/Filter.ts b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/Filter.ts new file mode 100644 index 000000000..b95b36418 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/Filter.ts @@ -0,0 +1,52 @@ +// Original file: deps/envoy-api/envoy/config/listener/v3/listener_components.proto + +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; +import type { ExtensionConfigSource as _envoy_config_core_v3_ExtensionConfigSource, ExtensionConfigSource__Output as _envoy_config_core_v3_ExtensionConfigSource__Output } from '../../../../envoy/config/core/v3/ExtensionConfigSource'; + +/** + * [#next-free-field: 6] + */ +export interface Filter { + /** + * The name of the filter configuration. + */ + 'name'?: (string); + /** + * Filter specific configuration which depends on the filter being + * instantiated. See the supported filters for further documentation. + * [#extension-category: envoy.filters.network] + */ + 'typed_config'?: (_google_protobuf_Any | null); + /** + * Configuration source specifier for an extension configuration discovery + * service. In case of a failure and without the default configuration, the + * listener closes the connections. + * [#not-implemented-hide:] + */ + 'config_discovery'?: (_envoy_config_core_v3_ExtensionConfigSource | null); + 'config_type'?: "typed_config"|"config_discovery"; +} + +/** + * [#next-free-field: 6] + */ +export interface Filter__Output { + /** + * The name of the filter configuration. + */ + 'name': (string); + /** + * Filter specific configuration which depends on the filter being + * instantiated. See the supported filters for further documentation. + * [#extension-category: envoy.filters.network] + */ + 'typed_config'?: (_google_protobuf_Any__Output | null); + /** + * Configuration source specifier for an extension configuration discovery + * service. In case of a failure and without the default configuration, the + * listener closes the connections. + * [#not-implemented-hide:] + */ + 'config_discovery'?: (_envoy_config_core_v3_ExtensionConfigSource__Output | null); + 'config_type': "typed_config"|"config_discovery"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/FilterChain.ts b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/FilterChain.ts new file mode 100644 index 000000000..77b08f48e --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/FilterChain.ts @@ -0,0 +1,188 @@ +// Original file: deps/envoy-api/envoy/config/listener/v3/listener_components.proto + +import type { FilterChainMatch as _envoy_config_listener_v3_FilterChainMatch, FilterChainMatch__Output as _envoy_config_listener_v3_FilterChainMatch__Output } from '../../../../envoy/config/listener/v3/FilterChainMatch'; +import type { Filter as _envoy_config_listener_v3_Filter, Filter__Output as _envoy_config_listener_v3_Filter__Output } from '../../../../envoy/config/listener/v3/Filter'; +import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; +import type { Metadata as _envoy_config_core_v3_Metadata, Metadata__Output as _envoy_config_core_v3_Metadata__Output } from '../../../../envoy/config/core/v3/Metadata'; +import type { TransportSocket as _envoy_config_core_v3_TransportSocket, TransportSocket__Output as _envoy_config_core_v3_TransportSocket__Output } from '../../../../envoy/config/core/v3/TransportSocket'; +import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; + +/** + * The configuration for on-demand filter chain. If this field is not empty in FilterChain message, + * a filter chain will be built on-demand. + * On-demand filter chains help speedup the warming up of listeners since the building and initialization of + * an on-demand filter chain will be postponed to the arrival of new connection requests that require this filter chain. + * Filter chains that are not often used can be set as on-demand. + */ +export interface _envoy_config_listener_v3_FilterChain_OnDemandConfiguration { + /** + * The timeout to wait for filter chain placeholders to complete rebuilding. + * 1. If this field is set to 0, timeout is disabled. + * 2. If not specified, a default timeout of 15s is used. + * Rebuilding will wait until dependencies are ready, have failed, or this timeout is reached. + * Upon failure or timeout, all connections related to this filter chain will be closed. + * Rebuilding will start again on the next new connection. + */ + 'rebuild_timeout'?: (_google_protobuf_Duration | null); +} + +/** + * The configuration for on-demand filter chain. If this field is not empty in FilterChain message, + * a filter chain will be built on-demand. + * On-demand filter chains help speedup the warming up of listeners since the building and initialization of + * an on-demand filter chain will be postponed to the arrival of new connection requests that require this filter chain. + * Filter chains that are not often used can be set as on-demand. + */ +export interface _envoy_config_listener_v3_FilterChain_OnDemandConfiguration__Output { + /** + * The timeout to wait for filter chain placeholders to complete rebuilding. + * 1. If this field is set to 0, timeout is disabled. + * 2. If not specified, a default timeout of 15s is used. + * Rebuilding will wait until dependencies are ready, have failed, or this timeout is reached. + * Upon failure or timeout, all connections related to this filter chain will be closed. + * Rebuilding will start again on the next new connection. + */ + 'rebuild_timeout': (_google_protobuf_Duration__Output | null); +} + +/** + * A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and + * various other parameters. + * [#next-free-field: 10] + */ +export interface FilterChain { + /** + * The criteria to use when matching a connection to this filter chain. + */ + 'filter_chain_match'?: (_envoy_config_listener_v3_FilterChainMatch | null); + /** + * A list of individual network filters that make up the filter chain for + * connections established with the listener. Order matters as the filters are + * processed sequentially as connection events happen. Note: If the filter + * list is empty, the connection will close by default. + * + * For QUIC listeners, network filters other than HTTP Connection Manager (HCM) + * can be created, but due to differences in the connection implementation compared + * to TCP, the onData() method will never be called. Therefore, network filters + * for QUIC listeners should only expect to do work at the start of a new connection + * (i.e. in onNewConnection()). HCM must be the last (or only) filter in the chain. + */ + 'filters'?: (_envoy_config_listener_v3_Filter)[]; + /** + * Whether the listener should expect a PROXY protocol V1 header on new + * connections. If this option is enabled, the listener will assume that that + * remote address of the connection is the one specified in the header. Some + * load balancers including the AWS ELB support this option. If the option is + * absent or set to false, Envoy will use the physical peer address of the + * connection as the remote address. + * + * This field is deprecated. Add a + * :ref:`PROXY protocol listener filter ` + * explicitly instead. + * @deprecated + */ + 'use_proxy_proto'?: (_google_protobuf_BoolValue | null); + /** + * [#not-implemented-hide:] filter chain metadata. + */ + 'metadata'?: (_envoy_config_core_v3_Metadata | null); + /** + * Optional custom transport socket implementation to use for downstream connections. + * To setup TLS, set a transport socket with name ``envoy.transport_sockets.tls`` and + * :ref:`DownstreamTlsContext ` in the ``typed_config``. + * If no transport socket configuration is specified, new connections + * will be set up with plaintext. + * [#extension-category: envoy.transport_sockets.downstream] + */ + 'transport_socket'?: (_envoy_config_core_v3_TransportSocket | null); + /** + * The unique name (or empty) by which this filter chain is known. + * Note: :ref:`filter_chain_matcher + * ` + * requires that filter chains are uniquely named within a listener. + */ + 'name'?: (string); + /** + * [#not-implemented-hide:] The configuration to specify whether the filter chain will be built on-demand. + * If this field is not empty, the filter chain will be built on-demand. + * Otherwise, the filter chain will be built normally and block listener warming. + */ + 'on_demand_configuration'?: (_envoy_config_listener_v3_FilterChain_OnDemandConfiguration | null); + /** + * If present and nonzero, the amount of time to allow incoming connections to complete any + * transport socket negotiations. If this expires before the transport reports connection + * establishment, the connection is summarily closed. + */ + 'transport_socket_connect_timeout'?: (_google_protobuf_Duration | null); +} + +/** + * A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and + * various other parameters. + * [#next-free-field: 10] + */ +export interface FilterChain__Output { + /** + * The criteria to use when matching a connection to this filter chain. + */ + 'filter_chain_match': (_envoy_config_listener_v3_FilterChainMatch__Output | null); + /** + * A list of individual network filters that make up the filter chain for + * connections established with the listener. Order matters as the filters are + * processed sequentially as connection events happen. Note: If the filter + * list is empty, the connection will close by default. + * + * For QUIC listeners, network filters other than HTTP Connection Manager (HCM) + * can be created, but due to differences in the connection implementation compared + * to TCP, the onData() method will never be called. Therefore, network filters + * for QUIC listeners should only expect to do work at the start of a new connection + * (i.e. in onNewConnection()). HCM must be the last (or only) filter in the chain. + */ + 'filters': (_envoy_config_listener_v3_Filter__Output)[]; + /** + * Whether the listener should expect a PROXY protocol V1 header on new + * connections. If this option is enabled, the listener will assume that that + * remote address of the connection is the one specified in the header. Some + * load balancers including the AWS ELB support this option. If the option is + * absent or set to false, Envoy will use the physical peer address of the + * connection as the remote address. + * + * This field is deprecated. Add a + * :ref:`PROXY protocol listener filter ` + * explicitly instead. + * @deprecated + */ + 'use_proxy_proto': (_google_protobuf_BoolValue__Output | null); + /** + * [#not-implemented-hide:] filter chain metadata. + */ + 'metadata': (_envoy_config_core_v3_Metadata__Output | null); + /** + * Optional custom transport socket implementation to use for downstream connections. + * To setup TLS, set a transport socket with name ``envoy.transport_sockets.tls`` and + * :ref:`DownstreamTlsContext ` in the ``typed_config``. + * If no transport socket configuration is specified, new connections + * will be set up with plaintext. + * [#extension-category: envoy.transport_sockets.downstream] + */ + 'transport_socket': (_envoy_config_core_v3_TransportSocket__Output | null); + /** + * The unique name (or empty) by which this filter chain is known. + * Note: :ref:`filter_chain_matcher + * ` + * requires that filter chains are uniquely named within a listener. + */ + 'name': (string); + /** + * [#not-implemented-hide:] The configuration to specify whether the filter chain will be built on-demand. + * If this field is not empty, the filter chain will be built on-demand. + * Otherwise, the filter chain will be built normally and block listener warming. + */ + 'on_demand_configuration': (_envoy_config_listener_v3_FilterChain_OnDemandConfiguration__Output | null); + /** + * If present and nonzero, the amount of time to allow incoming connections to complete any + * transport socket negotiations. If this expires before the transport reports connection + * establishment, the connection is summarily closed. + */ + 'transport_socket_connect_timeout': (_google_protobuf_Duration__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/listener/FilterChainMatch.ts b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/FilterChainMatch.ts similarity index 66% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/listener/FilterChainMatch.ts rename to packages/grpc-js-xds/src/generated/envoy/config/listener/v3/FilterChainMatch.ts index 881a5a961..fcbfc1b3e 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/listener/FilterChainMatch.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/FilterChainMatch.ts @@ -1,24 +1,43 @@ -// Original file: deps/envoy-api/envoy/api/v2/listener/listener_components.proto +// Original file: deps/envoy-api/envoy/config/listener/v3/listener_components.proto -import type { CidrRange as _envoy_api_v2_core_CidrRange, CidrRange__Output as _envoy_api_v2_core_CidrRange__Output } from '../../../../envoy/api/v2/core/CidrRange'; +import type { CidrRange as _envoy_config_core_v3_CidrRange, CidrRange__Output as _envoy_config_core_v3_CidrRange__Output } from '../../../../envoy/config/core/v3/CidrRange'; import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; -// Original file: deps/envoy-api/envoy/api/v2/listener/listener_components.proto +// Original file: deps/envoy-api/envoy/config/listener/v3/listener_components.proto -export enum _envoy_api_v2_listener_FilterChainMatch_ConnectionSourceType { +export const _envoy_config_listener_v3_FilterChainMatch_ConnectionSourceType = { /** * Any connection source matches. */ - ANY = 0, + ANY: 'ANY', /** * Match a connection originating from the same host. */ - LOCAL = 1, + SAME_IP_OR_LOOPBACK: 'SAME_IP_OR_LOOPBACK', /** * Match a connection originating from a different host. */ - EXTERNAL = 2, -} + EXTERNAL: 'EXTERNAL', +} as const; + +export type _envoy_config_listener_v3_FilterChainMatch_ConnectionSourceType = + /** + * Any connection source matches. + */ + | 'ANY' + | 0 + /** + * Match a connection originating from the same host. + */ + | 'SAME_IP_OR_LOOPBACK' + | 1 + /** + * Match a connection originating from a different host. + */ + | 'EXTERNAL' + | 2 + +export type _envoy_config_listener_v3_FilterChainMatch_ConnectionSourceType__Output = typeof _envoy_config_listener_v3_FilterChainMatch_ConnectionSourceType[keyof typeof _envoy_config_listener_v3_FilterChainMatch_ConnectionSourceType] /** * Specifies the match criteria for selecting a specific filter chain for a @@ -35,9 +54,12 @@ export enum _envoy_api_v2_listener_FilterChainMatch_ConnectionSourceType { * 3. Server name (e.g. SNI for TLS protocol), * 4. Transport protocol. * 5. Application protocols (e.g. ALPN for TLS protocol). - * 6. Source type (e.g. any, local or external network). - * 7. Source IP address. - * 8. Source port. + * 6. Directly connected source IP address (this will only be different from the source IP address + * when using a listener filter that overrides the source address, such as the :ref:`Proxy Protocol + * listener filter `). + * 7. Source type (e.g. any, local or external network). + * 8. Source IP address. + * 9. Source port. * * For criteria that allow ranges or wildcards, the most specific value in any * of the configured filter chains that matches the incoming connection is going @@ -45,18 +67,30 @@ export enum _envoy_api_v2_listener_FilterChainMatch_ConnectionSourceType { * ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter * chain without ``server_names`` requirements). * + * A different way to reason about the filter chain matches: + * Suppose there exists N filter chains. Prune the filter chain set using the above 8 steps. + * In each step, filter chains which most specifically matches the attributes continue to the next step. + * The listener guarantees at most 1 filter chain is left after all of the steps. + * + * Example: + * + * For destination port, filter chains specifying the destination port of incoming traffic are the + * most specific match. If none of the filter chains specifies the exact destination port, the filter + * chains which do not specify ports are the most specific match. Filter chains specifying the + * wrong port can never be the most specific match. + * * [#comment: Implemented rules are kept in the preference order, with deprecated fields * listed at the end, because that's how we want to list them in the docs. * * [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] - * [#next-free-field: 13] + * [#next-free-field: 14] */ export interface FilterChainMatch { /** * If non-empty, an IP address and prefix length to match addresses when the * listener is bound to 0.0.0.0/:: or when use_original_dst is specified. */ - 'prefix_ranges'?: (_envoy_api_v2_core_CidrRange)[]; + 'prefix_ranges'?: (_envoy_config_core_v3_CidrRange)[]; /** * If non-empty, an IP address and suffix length to match addresses when the * listener is bound to 0.0.0.0/:: or when use_original_dst is specified. @@ -66,14 +100,14 @@ export interface FilterChainMatch { /** * [#not-implemented-hide:] */ - 'suffix_len'?: (_google_protobuf_UInt32Value); + 'suffix_len'?: (_google_protobuf_UInt32Value | null); /** * The criteria is satisfied if the source IP address of the downstream * connection is contained in at least one of the specified subnets. If the * parameter is not specified or the list is empty, the source IP address is * ignored. */ - 'source_prefix_ranges'?: (_envoy_api_v2_core_CidrRange)[]; + 'source_prefix_ranges'?: (_envoy_config_core_v3_CidrRange)[]; /** * The criteria is satisfied if the source port of the downstream connection * is contained in at least one of the specified ports. If the parameter is @@ -84,7 +118,7 @@ export interface FilterChainMatch { * Optional destination port to consider when use_original_dst is set on the * listener in determining a filter chain match. */ - 'destination_port'?: (_google_protobuf_UInt32Value); + 'destination_port'?: (_google_protobuf_UInt32Value | null); /** * If non-empty, a transport protocol to consider when determining a filter chain match. * This value will be compared against the transport protocol of a new connection, when @@ -128,6 +162,7 @@ export interface FilterChainMatch { * will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``. * * Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid. + * The value ``*`` is also not supported, and ``server_names`` should be omitted instead. * * .. attention:: * @@ -138,7 +173,13 @@ export interface FilterChainMatch { /** * Specifies the connection source IP match type. Can be any, local or external network. */ - 'source_type'?: (_envoy_api_v2_listener_FilterChainMatch_ConnectionSourceType | keyof typeof _envoy_api_v2_listener_FilterChainMatch_ConnectionSourceType); + 'source_type'?: (_envoy_config_listener_v3_FilterChainMatch_ConnectionSourceType); + /** + * The criteria is satisfied if the directly connected source IP address of the downstream + * connection is contained in at least one of the specified subnets. If the parameter is not + * specified or the list is empty, the directly connected source IP address is ignored. + */ + 'direct_source_prefix_ranges'?: (_envoy_config_core_v3_CidrRange)[]; } /** @@ -156,9 +197,12 @@ export interface FilterChainMatch { * 3. Server name (e.g. SNI for TLS protocol), * 4. Transport protocol. * 5. Application protocols (e.g. ALPN for TLS protocol). - * 6. Source type (e.g. any, local or external network). - * 7. Source IP address. - * 8. Source port. + * 6. Directly connected source IP address (this will only be different from the source IP address + * when using a listener filter that overrides the source address, such as the :ref:`Proxy Protocol + * listener filter `). + * 7. Source type (e.g. any, local or external network). + * 8. Source IP address. + * 9. Source port. * * For criteria that allow ranges or wildcards, the most specific value in any * of the configured filter chains that matches the incoming connection is going @@ -166,18 +210,30 @@ export interface FilterChainMatch { * ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter * chain without ``server_names`` requirements). * + * A different way to reason about the filter chain matches: + * Suppose there exists N filter chains. Prune the filter chain set using the above 8 steps. + * In each step, filter chains which most specifically matches the attributes continue to the next step. + * The listener guarantees at most 1 filter chain is left after all of the steps. + * + * Example: + * + * For destination port, filter chains specifying the destination port of incoming traffic are the + * most specific match. If none of the filter chains specifies the exact destination port, the filter + * chains which do not specify ports are the most specific match. Filter chains specifying the + * wrong port can never be the most specific match. + * * [#comment: Implemented rules are kept in the preference order, with deprecated fields * listed at the end, because that's how we want to list them in the docs. * * [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] - * [#next-free-field: 13] + * [#next-free-field: 14] */ export interface FilterChainMatch__Output { /** * If non-empty, an IP address and prefix length to match addresses when the * listener is bound to 0.0.0.0/:: or when use_original_dst is specified. */ - 'prefix_ranges': (_envoy_api_v2_core_CidrRange__Output)[]; + 'prefix_ranges': (_envoy_config_core_v3_CidrRange__Output)[]; /** * If non-empty, an IP address and suffix length to match addresses when the * listener is bound to 0.0.0.0/:: or when use_original_dst is specified. @@ -187,14 +243,14 @@ export interface FilterChainMatch__Output { /** * [#not-implemented-hide:] */ - 'suffix_len'?: (_google_protobuf_UInt32Value__Output); + 'suffix_len': (_google_protobuf_UInt32Value__Output | null); /** * The criteria is satisfied if the source IP address of the downstream * connection is contained in at least one of the specified subnets. If the * parameter is not specified or the list is empty, the source IP address is * ignored. */ - 'source_prefix_ranges': (_envoy_api_v2_core_CidrRange__Output)[]; + 'source_prefix_ranges': (_envoy_config_core_v3_CidrRange__Output)[]; /** * The criteria is satisfied if the source port of the downstream connection * is contained in at least one of the specified ports. If the parameter is @@ -205,7 +261,7 @@ export interface FilterChainMatch__Output { * Optional destination port to consider when use_original_dst is set on the * listener in determining a filter chain match. */ - 'destination_port'?: (_google_protobuf_UInt32Value__Output); + 'destination_port': (_google_protobuf_UInt32Value__Output | null); /** * If non-empty, a transport protocol to consider when determining a filter chain match. * This value will be compared against the transport protocol of a new connection, when @@ -249,6 +305,7 @@ export interface FilterChainMatch__Output { * will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``. * * Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid. + * The value ``*`` is also not supported, and ``server_names`` should be omitted instead. * * .. attention:: * @@ -259,5 +316,11 @@ export interface FilterChainMatch__Output { /** * Specifies the connection source IP match type. Can be any, local or external network. */ - 'source_type': (keyof typeof _envoy_api_v2_listener_FilterChainMatch_ConnectionSourceType); + 'source_type': (_envoy_config_listener_v3_FilterChainMatch_ConnectionSourceType__Output); + /** + * The criteria is satisfied if the directly connected source IP address of the downstream + * connection is contained in at least one of the specified subnets. If the parameter is not + * specified or the list is empty, the directly connected source IP address is ignored. + */ + 'direct_source_prefix_ranges': (_envoy_config_core_v3_CidrRange__Output)[]; } diff --git a/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/Listener.ts b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/Listener.ts new file mode 100644 index 000000000..8897eacf5 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/Listener.ts @@ -0,0 +1,732 @@ +// Original file: deps/envoy-api/envoy/config/listener/v3/listener.proto + +import type { Address as _envoy_config_core_v3_Address, Address__Output as _envoy_config_core_v3_Address__Output } from '../../../../envoy/config/core/v3/Address'; +import type { FilterChain as _envoy_config_listener_v3_FilterChain, FilterChain__Output as _envoy_config_listener_v3_FilterChain__Output } from '../../../../envoy/config/listener/v3/FilterChain'; +import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; +import type { Metadata as _envoy_config_core_v3_Metadata, Metadata__Output as _envoy_config_core_v3_Metadata__Output } from '../../../../envoy/config/core/v3/Metadata'; +import type { ListenerFilter as _envoy_config_listener_v3_ListenerFilter, ListenerFilter__Output as _envoy_config_listener_v3_ListenerFilter__Output } from '../../../../envoy/config/listener/v3/ListenerFilter'; +import type { SocketOption as _envoy_config_core_v3_SocketOption, SocketOption__Output as _envoy_config_core_v3_SocketOption__Output } from '../../../../envoy/config/core/v3/SocketOption'; +import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; +import type { TrafficDirection as _envoy_config_core_v3_TrafficDirection, TrafficDirection__Output as _envoy_config_core_v3_TrafficDirection__Output } from '../../../../envoy/config/core/v3/TrafficDirection'; +import type { UdpListenerConfig as _envoy_config_listener_v3_UdpListenerConfig, UdpListenerConfig__Output as _envoy_config_listener_v3_UdpListenerConfig__Output } from '../../../../envoy/config/listener/v3/UdpListenerConfig'; +import type { ApiListener as _envoy_config_listener_v3_ApiListener, ApiListener__Output as _envoy_config_listener_v3_ApiListener__Output } from '../../../../envoy/config/listener/v3/ApiListener'; +import type { AccessLog as _envoy_config_accesslog_v3_AccessLog, AccessLog__Output as _envoy_config_accesslog_v3_AccessLog__Output } from '../../../../envoy/config/accesslog/v3/AccessLog'; +import type { Matcher as _xds_type_matcher_v3_Matcher, Matcher__Output as _xds_type_matcher_v3_Matcher__Output } from '../../../../xds/type/matcher/v3/Matcher'; +import type { AdditionalAddress as _envoy_config_listener_v3_AdditionalAddress, AdditionalAddress__Output as _envoy_config_listener_v3_AdditionalAddress__Output } from '../../../../envoy/config/listener/v3/AdditionalAddress'; +import type { TypedExtensionConfig as _envoy_config_core_v3_TypedExtensionConfig, TypedExtensionConfig__Output as _envoy_config_core_v3_TypedExtensionConfig__Output } from '../../../../envoy/config/core/v3/TypedExtensionConfig'; + +/** + * Configuration for listener connection balancing. + */ +export interface _envoy_config_listener_v3_Listener_ConnectionBalanceConfig { + /** + * If specified, the listener will use the exact connection balancer. + */ + 'exact_balance'?: (_envoy_config_listener_v3_Listener_ConnectionBalanceConfig_ExactBalance | null); + /** + * The listener will use the connection balancer according to ``type_url``. If ``type_url`` is invalid, + * Envoy will not attempt to balance active connections between worker threads. + * [#extension-category: envoy.network.connection_balance] + */ + 'extend_balance'?: (_envoy_config_core_v3_TypedExtensionConfig | null); + 'balance_type'?: "exact_balance"|"extend_balance"; +} + +/** + * Configuration for listener connection balancing. + */ +export interface _envoy_config_listener_v3_Listener_ConnectionBalanceConfig__Output { + /** + * If specified, the listener will use the exact connection balancer. + */ + 'exact_balance'?: (_envoy_config_listener_v3_Listener_ConnectionBalanceConfig_ExactBalance__Output | null); + /** + * The listener will use the connection balancer according to ``type_url``. If ``type_url`` is invalid, + * Envoy will not attempt to balance active connections between worker threads. + * [#extension-category: envoy.network.connection_balance] + */ + 'extend_balance'?: (_envoy_config_core_v3_TypedExtensionConfig__Output | null); + 'balance_type': "exact_balance"|"extend_balance"; +} + +/** + * [#not-implemented-hide:] + */ +export interface _envoy_config_listener_v3_Listener_DeprecatedV1 { + /** + * Whether the listener should bind to the port. A listener that doesn't + * bind can only receive connections redirected from other listeners that + * set use_original_dst parameter to true. Default is true. + * + * This is deprecated. Use :ref:`Listener.bind_to_port + * ` + */ + 'bind_to_port'?: (_google_protobuf_BoolValue | null); +} + +/** + * [#not-implemented-hide:] + */ +export interface _envoy_config_listener_v3_Listener_DeprecatedV1__Output { + /** + * Whether the listener should bind to the port. A listener that doesn't + * bind can only receive connections redirected from other listeners that + * set use_original_dst parameter to true. Default is true. + * + * This is deprecated. Use :ref:`Listener.bind_to_port + * ` + */ + 'bind_to_port': (_google_protobuf_BoolValue__Output | null); +} + +// Original file: deps/envoy-api/envoy/config/listener/v3/listener.proto + +export const _envoy_config_listener_v3_Listener_DrainType = { + /** + * Drain in response to calling /healthcheck/fail admin endpoint (along with the health check + * filter), listener removal/modification, and hot restart. + */ + DEFAULT: 'DEFAULT', + /** + * Drain in response to listener removal/modification and hot restart. This setting does not + * include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress + * and egress listeners. + */ + MODIFY_ONLY: 'MODIFY_ONLY', +} as const; + +export type _envoy_config_listener_v3_Listener_DrainType = + /** + * Drain in response to calling /healthcheck/fail admin endpoint (along with the health check + * filter), listener removal/modification, and hot restart. + */ + | 'DEFAULT' + | 0 + /** + * Drain in response to listener removal/modification and hot restart. This setting does not + * include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress + * and egress listeners. + */ + | 'MODIFY_ONLY' + | 1 + +export type _envoy_config_listener_v3_Listener_DrainType__Output = typeof _envoy_config_listener_v3_Listener_DrainType[keyof typeof _envoy_config_listener_v3_Listener_DrainType] + +/** + * A connection balancer implementation that does exact balancing. This means that a lock is + * held during balancing so that connection counts are nearly exactly balanced between worker + * threads. This is "nearly" exact in the sense that a connection might close in parallel thus + * making the counts incorrect, but this should be rectified on the next accept. This balancer + * sacrifices accept throughput for accuracy and should be used when there are a small number of + * connections that rarely cycle (e.g., service mesh gRPC egress). + */ +export interface _envoy_config_listener_v3_Listener_ConnectionBalanceConfig_ExactBalance { +} + +/** + * A connection balancer implementation that does exact balancing. This means that a lock is + * held during balancing so that connection counts are nearly exactly balanced between worker + * threads. This is "nearly" exact in the sense that a connection might close in parallel thus + * making the counts incorrect, but this should be rectified on the next accept. This balancer + * sacrifices accept throughput for accuracy and should be used when there are a small number of + * connections that rarely cycle (e.g., service mesh gRPC egress). + */ +export interface _envoy_config_listener_v3_Listener_ConnectionBalanceConfig_ExactBalance__Output { +} + +/** + * Configuration for envoy internal listener. All the future internal listener features should be added here. + */ +export interface _envoy_config_listener_v3_Listener_InternalListenerConfig { +} + +/** + * Configuration for envoy internal listener. All the future internal listener features should be added here. + */ +export interface _envoy_config_listener_v3_Listener_InternalListenerConfig__Output { +} + +/** + * [#next-free-field: 34] + */ +export interface Listener { + /** + * The unique name by which this listener is known. If no name is provided, + * Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically + * updated or removed via :ref:`LDS ` a unique name must be provided. + */ + 'name'?: (string); + /** + * The address that the listener should listen on. In general, the address must be unique, though + * that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on + * Linux as the actual port will be allocated by the OS. + * Required unless ``api_listener`` or ``listener_specifier`` is populated. + */ + 'address'?: (_envoy_config_core_v3_Address | null); + /** + * A list of filter chains to consider for this listener. The + * :ref:`FilterChain ` with the most specific + * :ref:`FilterChainMatch ` criteria is used on a + * connection. + * + * Example using SNI for filter chain selection can be found in the + * :ref:`FAQ entry `. + */ + 'filter_chains'?: (_envoy_config_listener_v3_FilterChain)[]; + /** + * If a connection is redirected using ``iptables``, the port on which the proxy + * receives it might be different from the original destination address. When this flag is set to + * true, the listener hands off redirected connections to the listener associated with the + * original destination address. If there is no listener associated with the original destination + * address, the connection is handled by the listener that receives it. Defaults to false. + */ + 'use_original_dst'?: (_google_protobuf_BoolValue | null); + /** + * Soft limit on size of the listener’s new connection read and write buffers. + * If unspecified, an implementation defined default is applied (1MiB). + */ + 'per_connection_buffer_limit_bytes'?: (_google_protobuf_UInt32Value | null); + /** + * Listener metadata. + */ + 'metadata'?: (_envoy_config_core_v3_Metadata | null); + /** + * [#not-implemented-hide:] + * @deprecated + */ + 'deprecated_v1'?: (_envoy_config_listener_v3_Listener_DeprecatedV1 | null); + /** + * The type of draining to perform at a listener-wide level. + */ + 'drain_type'?: (_envoy_config_listener_v3_Listener_DrainType); + /** + * Listener filters have the opportunity to manipulate and augment the connection metadata that + * is used in connection filter chain matching, for example. These filters are run before any in + * :ref:`filter_chains `. Order matters as the + * filters are processed sequentially right after a socket has been accepted by the listener, and + * before a connection is created. + * UDP Listener filters can be specified when the protocol in the listener socket address in + * :ref:`protocol ` is :ref:`UDP + * `. + */ + 'listener_filters'?: (_envoy_config_listener_v3_ListenerFilter)[]; + /** + * Whether the listener should be set as a transparent socket. + * When this flag is set to true, connections can be redirected to the listener using an + * ``iptables`` ``TPROXY`` target, in which case the original source and destination addresses and + * ports are preserved on accepted connections. This flag should be used in combination with + * :ref:`an original_dst ` :ref:`listener filter + * ` to mark the connections' local addresses as + * "restored." This can be used to hand off each redirected connection to another listener + * associated with the connection's destination address. Direct connections to the socket without + * using ``TPROXY`` cannot be distinguished from connections redirected using ``TPROXY`` and are + * therefore treated as if they were redirected. + * When this flag is set to false, the listener's socket is explicitly reset as non-transparent. + * Setting this flag requires Envoy to run with the ``CAP_NET_ADMIN`` capability. + * When this flag is not set (default), the socket is not modified, i.e. the transparent option + * is neither set nor reset. + */ + 'transparent'?: (_google_protobuf_BoolValue | null); + /** + * Whether the listener should set the ``IP_FREEBIND`` socket option. When this + * flag is set to true, listeners can be bound to an IP address that is not + * configured on the system running Envoy. When this flag is set to false, the + * option ``IP_FREEBIND`` is disabled on the socket. When this flag is not set + * (default), the socket is not modified, i.e. the option is neither enabled + * nor disabled. + */ + 'freebind'?: (_google_protobuf_BoolValue | null); + /** + * Whether the listener should accept TCP Fast Open (TFO) connections. + * When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on + * the socket, with a queue length of the specified size + * (see `details in RFC7413 `_). + * When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. + * When this flag is not set (default), the socket is not modified, + * i.e. the option is neither enabled nor disabled. + * + * On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable + * TCP_FASTOPEN. + * See `ip-sysctl.txt `_. + * + * On macOS, only values of 0, 1, and unset are valid; other values may result in an error. + * To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. + */ + 'tcp_fast_open_queue_length'?: (_google_protobuf_UInt32Value | null); + /** + * Additional socket options that may not be present in Envoy source code or + * precompiled binaries. The socket options can be updated for a listener when + * :ref:`enable_reuse_port ` + * is `true`. Otherwise, if socket options change during a listener update the update will be rejected + * to make it clear that the options were not updated. + */ + 'socket_options'?: (_envoy_config_core_v3_SocketOption)[]; + /** + * The timeout to wait for all listener filters to complete operation. If the timeout is reached, + * the accepted socket is closed without a connection being created unless + * ``continue_on_listener_filters_timeout`` is set to true. Specify 0 to disable the + * timeout. If not specified, a default timeout of 15s is used. + */ + 'listener_filters_timeout'?: (_google_protobuf_Duration | null); + /** + * Specifies the intended direction of the traffic relative to the local Envoy. + * This property is required on Windows for listeners using the original destination filter, + * see :ref:`Original Destination `. + */ + 'traffic_direction'?: (_envoy_config_core_v3_TrafficDirection); + /** + * Whether a connection should be created when listener filters timeout. Default is false. + * + * .. attention:: + * + * Some listener filters, such as :ref:`Proxy Protocol filter + * `, should not be used with this option. It will cause + * unexpected behavior when a connection is created. + */ + 'continue_on_listener_filters_timeout'?: (boolean); + /** + * If the protocol in the listener socket address in :ref:`protocol + * ` is :ref:`UDP + * `, this field specifies UDP + * listener specific configuration. + */ + 'udp_listener_config'?: (_envoy_config_listener_v3_UdpListenerConfig | null); + /** + * Used to represent an API listener, which is used in non-proxy clients. The type of API + * exposed to the non-proxy application depends on the type of API listener. + * When this field is set, no other field except for :ref:`name` + * should be set. + * + * .. note:: + * + * Currently only one ApiListener can be installed; and it can only be done via bootstrap config, + * not LDS. + * + * [#next-major-version: In the v3 API, instead of this messy approach where the socket + * listener fields are directly in the top-level Listener message and the API listener types + * are in the ApiListener message, the socket listener messages should be in their own message, + * and the top-level Listener should essentially be a oneof that selects between the + * socket listener and the various types of API listener. That way, a given Listener message + * can structurally only contain the fields of the relevant type.] + */ + 'api_listener'?: (_envoy_config_listener_v3_ApiListener | null); + /** + * The listener's connection balancer configuration, currently only applicable to TCP listeners. + * If no configuration is specified, Envoy will not attempt to balance active connections between + * worker threads. + * + * In the scenario that the listener X redirects all the connections to the listeners Y1 and Y2 + * by setting :ref:`use_original_dst ` in X + * and :ref:`bind_to_port ` to false in Y1 and Y2, + * it is recommended to disable the balance config in listener X to avoid the cost of balancing, and + * enable the balance config in Y1 and Y2 to balance the connections among the workers. + */ + 'connection_balance_config'?: (_envoy_config_listener_v3_Listener_ConnectionBalanceConfig | null); + /** + * Deprecated. Use ``enable_reuse_port`` instead. + * @deprecated + */ + 'reuse_port'?: (boolean); + /** + * Configuration for :ref:`access logs ` + * emitted by this listener. + */ + 'access_log'?: (_envoy_config_accesslog_v3_AccessLog)[]; + /** + * The maximum length a tcp listener's pending connections queue can grow to. If no value is + * provided net.core.somaxconn will be used on Linux and 128 otherwise. + */ + 'tcp_backlog_size'?: (_google_protobuf_UInt32Value | null); + /** + * The default filter chain if none of the filter chain matches. If no default filter chain is supplied, + * the connection will be closed. The filter chain match is ignored in this field. + */ + 'default_filter_chain'?: (_envoy_config_listener_v3_FilterChain | null); + /** + * Whether the listener should bind to the port. A listener that doesn't + * bind can only receive connections redirected from other listeners that set + * :ref:`use_original_dst ` + * to true. Default is true. + */ + 'bind_to_port'?: (_google_protobuf_BoolValue | null); + /** + * Used to represent an internal listener which does not listen on OSI L4 address but can be used by the + * :ref:`envoy cluster ` to create a user space connection to. + * The internal listener acts as a TCP listener. It supports listener filters and network filter chains. + * Upstream clusters refer to the internal listeners by their :ref:`name + * `. :ref:`Address + * ` must not be set on the internal listeners. + * + * There are some limitations that are derived from the implementation. The known limitations include: + * + * * :ref:`ConnectionBalanceConfig ` is not + * allowed because both the cluster connection and the listener connection must be owned by the same dispatcher. + * * :ref:`tcp_backlog_size ` + * * :ref:`freebind ` + * * :ref:`transparent ` + */ + 'internal_listener'?: (_envoy_config_listener_v3_Listener_InternalListenerConfig | null); + /** + * Optional prefix to use on listener stats. If empty, the stats will be rooted at + * ``listener.
.``. If non-empty, stats will be rooted at + * ``listener..``. + */ + 'stat_prefix'?: (string); + /** + * When this flag is set to true, listeners set the ``SO_REUSEPORT`` socket option and + * create one socket for each worker thread. This makes inbound connections + * distribute among worker threads roughly evenly in cases where there are a high number + * of connections. When this flag is set to false, all worker threads share one socket. This field + * defaults to true. The change of field will be rejected during an listener update when the + * runtime flag ``envoy.reloadable_features.enable_update_listener_socket_options`` is enabled. + * Otherwise, the update of this field will be ignored quietly. + * + * .. attention:: + * + * Although this field defaults to true, it has different behavior on different platforms. See + * the following text for more information. + * + * * On Linux, reuse_port is respected for both TCP and UDP listeners. It also works correctly + * with hot restart. + * * On macOS, reuse_port for TCP does not do what it does on Linux. Instead of load balancing, + * the last socket wins and receives all connections/packets. For TCP, reuse_port is force + * disabled and the user is warned. For UDP, it is enabled, but only one worker will receive + * packets. For QUIC/H3, SW routing will send packets to other workers. For "raw" UDP, only + * a single worker will currently receive packets. + * * On Windows, reuse_port for TCP has undefined behavior. It is force disabled and the user + * is warned similar to macOS. It is left enabled for UDP with undefined behavior currently. + */ + 'enable_reuse_port'?: (_google_protobuf_BoolValue | null); + /** + * Enable MPTCP (multi-path TCP) on this listener. Clients will be allowed to establish + * MPTCP connections. Non-MPTCP clients will fall back to regular TCP. + */ + 'enable_mptcp'?: (boolean); + /** + * Whether the listener should limit connections based upon the value of + * :ref:`global_downstream_max_connections `. + */ + 'ignore_global_conn_limit'?: (boolean); + /** + * :ref:`Matcher API ` resolving the filter chain name from the + * network properties. This matcher is used as a replacement for the filter chain match condition + * :ref:`filter_chain_match + * `. If specified, all + * :ref:`filter_chains ` must have a + * non-empty and unique :ref:`name ` field + * and not specify :ref:`filter_chain_match + * ` field. + * + * .. note:: + * + * Once matched, each connection is permanently bound to its filter chain. + * If the matcher changes but the filter chain remains the same, the + * connections bound to the filter chain are not drained. If, however, the + * filter chain is removed or structurally modified, then the drain for its + * connections is initiated. + */ + 'filter_chain_matcher'?: (_xds_type_matcher_v3_Matcher | null); + /** + * The additional addresses the listener should listen on. The addresses must be unique across all + * listeners. Multiple addresses with port 0 can be supplied. When using multiple addresses in a single listener, + * all addresses use the same protocol, and multiple internal addresses are not supported. + */ + 'additional_addresses'?: (_envoy_config_listener_v3_AdditionalAddress)[]; + /** + * The exclusive listener type and the corresponding config. + */ + 'listener_specifier'?: "internal_listener"; +} + +/** + * [#next-free-field: 34] + */ +export interface Listener__Output { + /** + * The unique name by which this listener is known. If no name is provided, + * Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically + * updated or removed via :ref:`LDS ` a unique name must be provided. + */ + 'name': (string); + /** + * The address that the listener should listen on. In general, the address must be unique, though + * that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on + * Linux as the actual port will be allocated by the OS. + * Required unless ``api_listener`` or ``listener_specifier`` is populated. + */ + 'address': (_envoy_config_core_v3_Address__Output | null); + /** + * A list of filter chains to consider for this listener. The + * :ref:`FilterChain ` with the most specific + * :ref:`FilterChainMatch ` criteria is used on a + * connection. + * + * Example using SNI for filter chain selection can be found in the + * :ref:`FAQ entry `. + */ + 'filter_chains': (_envoy_config_listener_v3_FilterChain__Output)[]; + /** + * If a connection is redirected using ``iptables``, the port on which the proxy + * receives it might be different from the original destination address. When this flag is set to + * true, the listener hands off redirected connections to the listener associated with the + * original destination address. If there is no listener associated with the original destination + * address, the connection is handled by the listener that receives it. Defaults to false. + */ + 'use_original_dst': (_google_protobuf_BoolValue__Output | null); + /** + * Soft limit on size of the listener’s new connection read and write buffers. + * If unspecified, an implementation defined default is applied (1MiB). + */ + 'per_connection_buffer_limit_bytes': (_google_protobuf_UInt32Value__Output | null); + /** + * Listener metadata. + */ + 'metadata': (_envoy_config_core_v3_Metadata__Output | null); + /** + * [#not-implemented-hide:] + * @deprecated + */ + 'deprecated_v1': (_envoy_config_listener_v3_Listener_DeprecatedV1__Output | null); + /** + * The type of draining to perform at a listener-wide level. + */ + 'drain_type': (_envoy_config_listener_v3_Listener_DrainType__Output); + /** + * Listener filters have the opportunity to manipulate and augment the connection metadata that + * is used in connection filter chain matching, for example. These filters are run before any in + * :ref:`filter_chains `. Order matters as the + * filters are processed sequentially right after a socket has been accepted by the listener, and + * before a connection is created. + * UDP Listener filters can be specified when the protocol in the listener socket address in + * :ref:`protocol ` is :ref:`UDP + * `. + */ + 'listener_filters': (_envoy_config_listener_v3_ListenerFilter__Output)[]; + /** + * Whether the listener should be set as a transparent socket. + * When this flag is set to true, connections can be redirected to the listener using an + * ``iptables`` ``TPROXY`` target, in which case the original source and destination addresses and + * ports are preserved on accepted connections. This flag should be used in combination with + * :ref:`an original_dst ` :ref:`listener filter + * ` to mark the connections' local addresses as + * "restored." This can be used to hand off each redirected connection to another listener + * associated with the connection's destination address. Direct connections to the socket without + * using ``TPROXY`` cannot be distinguished from connections redirected using ``TPROXY`` and are + * therefore treated as if they were redirected. + * When this flag is set to false, the listener's socket is explicitly reset as non-transparent. + * Setting this flag requires Envoy to run with the ``CAP_NET_ADMIN`` capability. + * When this flag is not set (default), the socket is not modified, i.e. the transparent option + * is neither set nor reset. + */ + 'transparent': (_google_protobuf_BoolValue__Output | null); + /** + * Whether the listener should set the ``IP_FREEBIND`` socket option. When this + * flag is set to true, listeners can be bound to an IP address that is not + * configured on the system running Envoy. When this flag is set to false, the + * option ``IP_FREEBIND`` is disabled on the socket. When this flag is not set + * (default), the socket is not modified, i.e. the option is neither enabled + * nor disabled. + */ + 'freebind': (_google_protobuf_BoolValue__Output | null); + /** + * Whether the listener should accept TCP Fast Open (TFO) connections. + * When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on + * the socket, with a queue length of the specified size + * (see `details in RFC7413 `_). + * When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. + * When this flag is not set (default), the socket is not modified, + * i.e. the option is neither enabled nor disabled. + * + * On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable + * TCP_FASTOPEN. + * See `ip-sysctl.txt `_. + * + * On macOS, only values of 0, 1, and unset are valid; other values may result in an error. + * To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. + */ + 'tcp_fast_open_queue_length': (_google_protobuf_UInt32Value__Output | null); + /** + * Additional socket options that may not be present in Envoy source code or + * precompiled binaries. The socket options can be updated for a listener when + * :ref:`enable_reuse_port ` + * is `true`. Otherwise, if socket options change during a listener update the update will be rejected + * to make it clear that the options were not updated. + */ + 'socket_options': (_envoy_config_core_v3_SocketOption__Output)[]; + /** + * The timeout to wait for all listener filters to complete operation. If the timeout is reached, + * the accepted socket is closed without a connection being created unless + * ``continue_on_listener_filters_timeout`` is set to true. Specify 0 to disable the + * timeout. If not specified, a default timeout of 15s is used. + */ + 'listener_filters_timeout': (_google_protobuf_Duration__Output | null); + /** + * Specifies the intended direction of the traffic relative to the local Envoy. + * This property is required on Windows for listeners using the original destination filter, + * see :ref:`Original Destination `. + */ + 'traffic_direction': (_envoy_config_core_v3_TrafficDirection__Output); + /** + * Whether a connection should be created when listener filters timeout. Default is false. + * + * .. attention:: + * + * Some listener filters, such as :ref:`Proxy Protocol filter + * `, should not be used with this option. It will cause + * unexpected behavior when a connection is created. + */ + 'continue_on_listener_filters_timeout': (boolean); + /** + * If the protocol in the listener socket address in :ref:`protocol + * ` is :ref:`UDP + * `, this field specifies UDP + * listener specific configuration. + */ + 'udp_listener_config': (_envoy_config_listener_v3_UdpListenerConfig__Output | null); + /** + * Used to represent an API listener, which is used in non-proxy clients. The type of API + * exposed to the non-proxy application depends on the type of API listener. + * When this field is set, no other field except for :ref:`name` + * should be set. + * + * .. note:: + * + * Currently only one ApiListener can be installed; and it can only be done via bootstrap config, + * not LDS. + * + * [#next-major-version: In the v3 API, instead of this messy approach where the socket + * listener fields are directly in the top-level Listener message and the API listener types + * are in the ApiListener message, the socket listener messages should be in their own message, + * and the top-level Listener should essentially be a oneof that selects between the + * socket listener and the various types of API listener. That way, a given Listener message + * can structurally only contain the fields of the relevant type.] + */ + 'api_listener': (_envoy_config_listener_v3_ApiListener__Output | null); + /** + * The listener's connection balancer configuration, currently only applicable to TCP listeners. + * If no configuration is specified, Envoy will not attempt to balance active connections between + * worker threads. + * + * In the scenario that the listener X redirects all the connections to the listeners Y1 and Y2 + * by setting :ref:`use_original_dst ` in X + * and :ref:`bind_to_port ` to false in Y1 and Y2, + * it is recommended to disable the balance config in listener X to avoid the cost of balancing, and + * enable the balance config in Y1 and Y2 to balance the connections among the workers. + */ + 'connection_balance_config': (_envoy_config_listener_v3_Listener_ConnectionBalanceConfig__Output | null); + /** + * Deprecated. Use ``enable_reuse_port`` instead. + * @deprecated + */ + 'reuse_port': (boolean); + /** + * Configuration for :ref:`access logs ` + * emitted by this listener. + */ + 'access_log': (_envoy_config_accesslog_v3_AccessLog__Output)[]; + /** + * The maximum length a tcp listener's pending connections queue can grow to. If no value is + * provided net.core.somaxconn will be used on Linux and 128 otherwise. + */ + 'tcp_backlog_size': (_google_protobuf_UInt32Value__Output | null); + /** + * The default filter chain if none of the filter chain matches. If no default filter chain is supplied, + * the connection will be closed. The filter chain match is ignored in this field. + */ + 'default_filter_chain': (_envoy_config_listener_v3_FilterChain__Output | null); + /** + * Whether the listener should bind to the port. A listener that doesn't + * bind can only receive connections redirected from other listeners that set + * :ref:`use_original_dst ` + * to true. Default is true. + */ + 'bind_to_port': (_google_protobuf_BoolValue__Output | null); + /** + * Used to represent an internal listener which does not listen on OSI L4 address but can be used by the + * :ref:`envoy cluster ` to create a user space connection to. + * The internal listener acts as a TCP listener. It supports listener filters and network filter chains. + * Upstream clusters refer to the internal listeners by their :ref:`name + * `. :ref:`Address + * ` must not be set on the internal listeners. + * + * There are some limitations that are derived from the implementation. The known limitations include: + * + * * :ref:`ConnectionBalanceConfig ` is not + * allowed because both the cluster connection and the listener connection must be owned by the same dispatcher. + * * :ref:`tcp_backlog_size ` + * * :ref:`freebind ` + * * :ref:`transparent ` + */ + 'internal_listener'?: (_envoy_config_listener_v3_Listener_InternalListenerConfig__Output | null); + /** + * Optional prefix to use on listener stats. If empty, the stats will be rooted at + * ``listener.
.``. If non-empty, stats will be rooted at + * ``listener..``. + */ + 'stat_prefix': (string); + /** + * When this flag is set to true, listeners set the ``SO_REUSEPORT`` socket option and + * create one socket for each worker thread. This makes inbound connections + * distribute among worker threads roughly evenly in cases where there are a high number + * of connections. When this flag is set to false, all worker threads share one socket. This field + * defaults to true. The change of field will be rejected during an listener update when the + * runtime flag ``envoy.reloadable_features.enable_update_listener_socket_options`` is enabled. + * Otherwise, the update of this field will be ignored quietly. + * + * .. attention:: + * + * Although this field defaults to true, it has different behavior on different platforms. See + * the following text for more information. + * + * * On Linux, reuse_port is respected for both TCP and UDP listeners. It also works correctly + * with hot restart. + * * On macOS, reuse_port for TCP does not do what it does on Linux. Instead of load balancing, + * the last socket wins and receives all connections/packets. For TCP, reuse_port is force + * disabled and the user is warned. For UDP, it is enabled, but only one worker will receive + * packets. For QUIC/H3, SW routing will send packets to other workers. For "raw" UDP, only + * a single worker will currently receive packets. + * * On Windows, reuse_port for TCP has undefined behavior. It is force disabled and the user + * is warned similar to macOS. It is left enabled for UDP with undefined behavior currently. + */ + 'enable_reuse_port': (_google_protobuf_BoolValue__Output | null); + /** + * Enable MPTCP (multi-path TCP) on this listener. Clients will be allowed to establish + * MPTCP connections. Non-MPTCP clients will fall back to regular TCP. + */ + 'enable_mptcp': (boolean); + /** + * Whether the listener should limit connections based upon the value of + * :ref:`global_downstream_max_connections `. + */ + 'ignore_global_conn_limit': (boolean); + /** + * :ref:`Matcher API ` resolving the filter chain name from the + * network properties. This matcher is used as a replacement for the filter chain match condition + * :ref:`filter_chain_match + * `. If specified, all + * :ref:`filter_chains ` must have a + * non-empty and unique :ref:`name ` field + * and not specify :ref:`filter_chain_match + * ` field. + * + * .. note:: + * + * Once matched, each connection is permanently bound to its filter chain. + * If the matcher changes but the filter chain remains the same, the + * connections bound to the filter chain are not drained. If, however, the + * filter chain is removed or structurally modified, then the drain for its + * connections is initiated. + */ + 'filter_chain_matcher': (_xds_type_matcher_v3_Matcher__Output | null); + /** + * The additional addresses the listener should listen on. The addresses must be unique across all + * listeners. Multiple addresses with port 0 can be supplied. When using multiple addresses in a single listener, + * all addresses use the same protocol, and multiple internal addresses are not supported. + */ + 'additional_addresses': (_envoy_config_listener_v3_AdditionalAddress__Output)[]; + /** + * The exclusive listener type and the corresponding config. + */ + 'listener_specifier': "internal_listener"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ListenerCollection.ts b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ListenerCollection.ts new file mode 100644 index 000000000..a1e7a10ca --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ListenerCollection.ts @@ -0,0 +1,19 @@ +// Original file: deps/envoy-api/envoy/config/listener/v3/listener.proto + +import type { CollectionEntry as _xds_core_v3_CollectionEntry, CollectionEntry__Output as _xds_core_v3_CollectionEntry__Output } from '../../../../xds/core/v3/CollectionEntry'; + +/** + * Listener list collections. Entries are ``Listener`` resources or references. + * [#not-implemented-hide:] + */ +export interface ListenerCollection { + 'entries'?: (_xds_core_v3_CollectionEntry)[]; +} + +/** + * Listener list collections. Entries are ``Listener`` resources or references. + * [#not-implemented-hide:] + */ +export interface ListenerCollection__Output { + 'entries': (_xds_core_v3_CollectionEntry__Output)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ListenerFilter.ts b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ListenerFilter.ts new file mode 100644 index 000000000..5844c4bbe --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ListenerFilter.ts @@ -0,0 +1,63 @@ +// Original file: deps/envoy-api/envoy/config/listener/v3/listener_components.proto + +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; +import type { ListenerFilterChainMatchPredicate as _envoy_config_listener_v3_ListenerFilterChainMatchPredicate, ListenerFilterChainMatchPredicate__Output as _envoy_config_listener_v3_ListenerFilterChainMatchPredicate__Output } from '../../../../envoy/config/listener/v3/ListenerFilterChainMatchPredicate'; +import type { ExtensionConfigSource as _envoy_config_core_v3_ExtensionConfigSource, ExtensionConfigSource__Output as _envoy_config_core_v3_ExtensionConfigSource__Output } from '../../../../envoy/config/core/v3/ExtensionConfigSource'; + +/** + * [#next-free-field: 6] + */ +export interface ListenerFilter { + /** + * The name of the filter configuration. + */ + 'name'?: (string); + /** + * Filter specific configuration which depends on the filter being + * instantiated. See the supported filters for further documentation. + * [#extension-category: envoy.filters.listener,envoy.filters.udp_listener] + */ + 'typed_config'?: (_google_protobuf_Any | null); + /** + * Optional match predicate used to disable the filter. The filter is enabled when this field is empty. + * See :ref:`ListenerFilterChainMatchPredicate ` + * for further examples. + */ + 'filter_disabled'?: (_envoy_config_listener_v3_ListenerFilterChainMatchPredicate | null); + /** + * Configuration source specifier for an extension configuration discovery + * service. In case of a failure and without the default configuration, the + * listener closes the connections. + */ + 'config_discovery'?: (_envoy_config_core_v3_ExtensionConfigSource | null); + 'config_type'?: "typed_config"|"config_discovery"; +} + +/** + * [#next-free-field: 6] + */ +export interface ListenerFilter__Output { + /** + * The name of the filter configuration. + */ + 'name': (string); + /** + * Filter specific configuration which depends on the filter being + * instantiated. See the supported filters for further documentation. + * [#extension-category: envoy.filters.listener,envoy.filters.udp_listener] + */ + 'typed_config'?: (_google_protobuf_Any__Output | null); + /** + * Optional match predicate used to disable the filter. The filter is enabled when this field is empty. + * See :ref:`ListenerFilterChainMatchPredicate ` + * for further examples. + */ + 'filter_disabled': (_envoy_config_listener_v3_ListenerFilterChainMatchPredicate__Output | null); + /** + * Configuration source specifier for an extension configuration discovery + * service. In case of a failure and without the default configuration, the + * listener closes the connections. + */ + 'config_discovery'?: (_envoy_config_core_v3_ExtensionConfigSource__Output | null); + 'config_type': "typed_config"|"config_discovery"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/listener/ListenerFilterChainMatchPredicate.ts b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ListenerFilterChainMatchPredicate.ts similarity index 65% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/listener/ListenerFilterChainMatchPredicate.ts rename to packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ListenerFilterChainMatchPredicate.ts index ac3ddfd2a..bb743a29d 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/listener/ListenerFilterChainMatchPredicate.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ListenerFilterChainMatchPredicate.ts @@ -1,26 +1,26 @@ -// Original file: deps/envoy-api/envoy/api/v2/listener/listener_components.proto +// Original file: deps/envoy-api/envoy/config/listener/v3/listener_components.proto -import type { ListenerFilterChainMatchPredicate as _envoy_api_v2_listener_ListenerFilterChainMatchPredicate, ListenerFilterChainMatchPredicate__Output as _envoy_api_v2_listener_ListenerFilterChainMatchPredicate__Output } from '../../../../envoy/api/v2/listener/ListenerFilterChainMatchPredicate'; -import type { Int32Range as _envoy_type_Int32Range, Int32Range__Output as _envoy_type_Int32Range__Output } from '../../../../envoy/type/Int32Range'; +import type { ListenerFilterChainMatchPredicate as _envoy_config_listener_v3_ListenerFilterChainMatchPredicate, ListenerFilterChainMatchPredicate__Output as _envoy_config_listener_v3_ListenerFilterChainMatchPredicate__Output } from '../../../../envoy/config/listener/v3/ListenerFilterChainMatchPredicate'; +import type { Int32Range as _envoy_type_v3_Int32Range, Int32Range__Output as _envoy_type_v3_Int32Range__Output } from '../../../../envoy/type/v3/Int32Range'; /** * A set of match configurations used for logical operations. */ -export interface _envoy_api_v2_listener_ListenerFilterChainMatchPredicate_MatchSet { +export interface _envoy_config_listener_v3_ListenerFilterChainMatchPredicate_MatchSet { /** * The list of rules that make up the set. */ - 'rules'?: (_envoy_api_v2_listener_ListenerFilterChainMatchPredicate)[]; + 'rules'?: (_envoy_config_listener_v3_ListenerFilterChainMatchPredicate)[]; } /** * A set of match configurations used for logical operations. */ -export interface _envoy_api_v2_listener_ListenerFilterChainMatchPredicate_MatchSet__Output { +export interface _envoy_config_listener_v3_ListenerFilterChainMatchPredicate_MatchSet__Output { /** * The list of rules that make up the set. */ - 'rules': (_envoy_api_v2_listener_ListenerFilterChainMatchPredicate__Output)[]; + 'rules': (_envoy_config_listener_v3_ListenerFilterChainMatchPredicate__Output)[]; } /** @@ -45,7 +45,7 @@ export interface _envoy_api_v2_listener_ListenerFilterChainMatchPredicate_MatchS * rules: * - destination_port_range: * start: 3306 - * end: 3306 + * end: 3307 * - destination_port_range: * start: 15000 * end: 15001 @@ -57,16 +57,16 @@ export interface ListenerFilterChainMatchPredicate { * A set that describes a logical OR. If any member of the set matches, the match configuration * matches. */ - 'or_match'?: (_envoy_api_v2_listener_ListenerFilterChainMatchPredicate_MatchSet); + 'or_match'?: (_envoy_config_listener_v3_ListenerFilterChainMatchPredicate_MatchSet | null); /** * A set that describes a logical AND. If all members of the set match, the match configuration * matches. */ - 'and_match'?: (_envoy_api_v2_listener_ListenerFilterChainMatchPredicate_MatchSet); + 'and_match'?: (_envoy_config_listener_v3_ListenerFilterChainMatchPredicate_MatchSet | null); /** * A negation match. The match configuration will match if the negated match condition matches. */ - 'not_match'?: (_envoy_api_v2_listener_ListenerFilterChainMatchPredicate); + 'not_match'?: (_envoy_config_listener_v3_ListenerFilterChainMatchPredicate | null); /** * The match configuration will always match. */ @@ -75,7 +75,7 @@ export interface ListenerFilterChainMatchPredicate { * Match destination port. Particularly, the match evaluation must use the recovered local port if * the owning listener filter is after :ref:`an original_dst listener filter `. */ - 'destination_port_range'?: (_envoy_type_Int32Range); + 'destination_port_range'?: (_envoy_type_v3_Int32Range | null); 'rule'?: "or_match"|"and_match"|"not_match"|"any_match"|"destination_port_range"; } @@ -101,7 +101,7 @@ export interface ListenerFilterChainMatchPredicate { * rules: * - destination_port_range: * start: 3306 - * end: 3306 + * end: 3307 * - destination_port_range: * start: 15000 * end: 15001 @@ -113,16 +113,16 @@ export interface ListenerFilterChainMatchPredicate__Output { * A set that describes a logical OR. If any member of the set matches, the match configuration * matches. */ - 'or_match'?: (_envoy_api_v2_listener_ListenerFilterChainMatchPredicate_MatchSet__Output); + 'or_match'?: (_envoy_config_listener_v3_ListenerFilterChainMatchPredicate_MatchSet__Output | null); /** * A set that describes a logical AND. If all members of the set match, the match configuration * matches. */ - 'and_match'?: (_envoy_api_v2_listener_ListenerFilterChainMatchPredicate_MatchSet__Output); + 'and_match'?: (_envoy_config_listener_v3_ListenerFilterChainMatchPredicate_MatchSet__Output | null); /** * A negation match. The match configuration will match if the negated match condition matches. */ - 'not_match'?: (_envoy_api_v2_listener_ListenerFilterChainMatchPredicate__Output); + 'not_match'?: (_envoy_config_listener_v3_ListenerFilterChainMatchPredicate__Output | null); /** * The match configuration will always match. */ @@ -131,6 +131,6 @@ export interface ListenerFilterChainMatchPredicate__Output { * Match destination port. Particularly, the match evaluation must use the recovered local port if * the owning listener filter is after :ref:`an original_dst listener filter `. */ - 'destination_port_range'?: (_envoy_type_Int32Range__Output); + 'destination_port_range'?: (_envoy_type_v3_Int32Range__Output | null); 'rule': "or_match"|"and_match"|"not_match"|"any_match"|"destination_port_range"; } diff --git a/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ListenerManager.ts b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ListenerManager.ts new file mode 100644 index 000000000..d27a10c68 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ListenerManager.ts @@ -0,0 +1,18 @@ +// Original file: deps/envoy-api/envoy/config/listener/v3/listener.proto + + +/** + * A placeholder proto so that users can explicitly configure the standard + * Listener Manager via the bootstrap's :ref:`listener_manager `. + * [#not-implemented-hide:] + */ +export interface ListenerManager { +} + +/** + * A placeholder proto so that users can explicitly configure the standard + * Listener Manager via the bootstrap's :ref:`listener_manager `. + * [#not-implemented-hide:] + */ +export interface ListenerManager__Output { +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/QuicProtocolOptions.ts b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/QuicProtocolOptions.ts new file mode 100644 index 000000000..e88ab26a9 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/QuicProtocolOptions.ts @@ -0,0 +1,129 @@ +// Original file: deps/envoy-api/envoy/config/listener/v3/quic_config.proto + +import type { QuicProtocolOptions as _envoy_config_core_v3_QuicProtocolOptions, QuicProtocolOptions__Output as _envoy_config_core_v3_QuicProtocolOptions__Output } from '../../../../envoy/config/core/v3/QuicProtocolOptions'; +import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; +import type { RuntimeFeatureFlag as _envoy_config_core_v3_RuntimeFeatureFlag, RuntimeFeatureFlag__Output as _envoy_config_core_v3_RuntimeFeatureFlag__Output } from '../../../../envoy/config/core/v3/RuntimeFeatureFlag'; +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; +import type { TypedExtensionConfig as _envoy_config_core_v3_TypedExtensionConfig, TypedExtensionConfig__Output as _envoy_config_core_v3_TypedExtensionConfig__Output } from '../../../../envoy/config/core/v3/TypedExtensionConfig'; + +/** + * Configuration specific to the UDP QUIC listener. + * [#next-free-field: 10] + */ +export interface QuicProtocolOptions { + 'quic_protocol_options'?: (_envoy_config_core_v3_QuicProtocolOptions | null); + /** + * Maximum number of milliseconds that connection will be alive when there is + * no network activity. + * + * If it is less than 1ms, Envoy will use 1ms. 300000ms if not specified. + */ + 'idle_timeout'?: (_google_protobuf_Duration | null); + /** + * Connection timeout in milliseconds before the crypto handshake is finished. + * + * If it is less than 5000ms, Envoy will use 5000ms. 20000ms if not specified. + */ + 'crypto_handshake_timeout'?: (_google_protobuf_Duration | null); + /** + * Runtime flag that controls whether the listener is enabled or not. If not specified, defaults + * to enabled. + */ + 'enabled'?: (_envoy_config_core_v3_RuntimeFeatureFlag | null); + /** + * A multiplier to number of connections which is used to determine how many packets to read per + * event loop. A reasonable number should allow the listener to process enough payload but not + * starve TCP and other UDP sockets and also prevent long event loop duration. + * The default value is 32. This means if there are N QUIC connections, the total number of + * packets to read in each read event will be 32 * N. + * The actual number of packets to read in total by the UDP listener is also + * bound by 6000, regardless of this field or how many connections there are. + */ + 'packets_to_read_to_connection_count_ratio'?: (_google_protobuf_UInt32Value | null); + /** + * Configure which implementation of ``quic::QuicCryptoClientStreamBase`` to be used for this listener. + * If not specified the :ref:`QUICHE default one configured by ` will be used. + * [#extension-category: envoy.quic.server.crypto_stream] + */ + 'crypto_stream_config'?: (_envoy_config_core_v3_TypedExtensionConfig | null); + /** + * Configure which implementation of ``quic::ProofSource`` to be used for this listener. + * If not specified the :ref:`default one configured by ` will be used. + * [#extension-category: envoy.quic.proof_source] + */ + 'proof_source_config'?: (_envoy_config_core_v3_TypedExtensionConfig | null); + /** + * Config which implementation of ``quic::ConnectionIdGeneratorInterface`` to be used for this listener. + * If not specified the :ref:`default one configured by ` will be used. + * [#extension-category: envoy.quic.connection_id_generator] + */ + 'connection_id_generator_config'?: (_envoy_config_core_v3_TypedExtensionConfig | null); + /** + * Configure the server's preferred address to advertise so that client can migrate to it. See :ref:`example ` which configures a pair of v4 and v6 preferred addresses. + * The current QUICHE implementation will advertise only one of the preferred IPv4 and IPv6 addresses based on the address family the client initially connects with, and only if the client is also QUICHE-based. + * If not specified, Envoy will not advertise any server's preferred address. + * [#extension-category: envoy.quic.server_preferred_address] + */ + 'server_preferred_address_config'?: (_envoy_config_core_v3_TypedExtensionConfig | null); +} + +/** + * Configuration specific to the UDP QUIC listener. + * [#next-free-field: 10] + */ +export interface QuicProtocolOptions__Output { + 'quic_protocol_options': (_envoy_config_core_v3_QuicProtocolOptions__Output | null); + /** + * Maximum number of milliseconds that connection will be alive when there is + * no network activity. + * + * If it is less than 1ms, Envoy will use 1ms. 300000ms if not specified. + */ + 'idle_timeout': (_google_protobuf_Duration__Output | null); + /** + * Connection timeout in milliseconds before the crypto handshake is finished. + * + * If it is less than 5000ms, Envoy will use 5000ms. 20000ms if not specified. + */ + 'crypto_handshake_timeout': (_google_protobuf_Duration__Output | null); + /** + * Runtime flag that controls whether the listener is enabled or not. If not specified, defaults + * to enabled. + */ + 'enabled': (_envoy_config_core_v3_RuntimeFeatureFlag__Output | null); + /** + * A multiplier to number of connections which is used to determine how many packets to read per + * event loop. A reasonable number should allow the listener to process enough payload but not + * starve TCP and other UDP sockets and also prevent long event loop duration. + * The default value is 32. This means if there are N QUIC connections, the total number of + * packets to read in each read event will be 32 * N. + * The actual number of packets to read in total by the UDP listener is also + * bound by 6000, regardless of this field or how many connections there are. + */ + 'packets_to_read_to_connection_count_ratio': (_google_protobuf_UInt32Value__Output | null); + /** + * Configure which implementation of ``quic::QuicCryptoClientStreamBase`` to be used for this listener. + * If not specified the :ref:`QUICHE default one configured by ` will be used. + * [#extension-category: envoy.quic.server.crypto_stream] + */ + 'crypto_stream_config': (_envoy_config_core_v3_TypedExtensionConfig__Output | null); + /** + * Configure which implementation of ``quic::ProofSource`` to be used for this listener. + * If not specified the :ref:`default one configured by ` will be used. + * [#extension-category: envoy.quic.proof_source] + */ + 'proof_source_config': (_envoy_config_core_v3_TypedExtensionConfig__Output | null); + /** + * Config which implementation of ``quic::ConnectionIdGeneratorInterface`` to be used for this listener. + * If not specified the :ref:`default one configured by ` will be used. + * [#extension-category: envoy.quic.connection_id_generator] + */ + 'connection_id_generator_config': (_envoy_config_core_v3_TypedExtensionConfig__Output | null); + /** + * Configure the server's preferred address to advertise so that client can migrate to it. See :ref:`example ` which configures a pair of v4 and v6 preferred addresses. + * The current QUICHE implementation will advertise only one of the preferred IPv4 and IPv6 addresses based on the address family the client initially connects with, and only if the client is also QUICHE-based. + * If not specified, Envoy will not advertise any server's preferred address. + * [#extension-category: envoy.quic.server_preferred_address] + */ + 'server_preferred_address_config': (_envoy_config_core_v3_TypedExtensionConfig__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/UdpListenerConfig.ts b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/UdpListenerConfig.ts new file mode 100644 index 000000000..63f9666e0 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/UdpListenerConfig.ts @@ -0,0 +1,59 @@ +// Original file: deps/envoy-api/envoy/config/listener/v3/udp_listener_config.proto + +import type { UdpSocketConfig as _envoy_config_core_v3_UdpSocketConfig, UdpSocketConfig__Output as _envoy_config_core_v3_UdpSocketConfig__Output } from '../../../../envoy/config/core/v3/UdpSocketConfig'; +import type { QuicProtocolOptions as _envoy_config_listener_v3_QuicProtocolOptions, QuicProtocolOptions__Output as _envoy_config_listener_v3_QuicProtocolOptions__Output } from '../../../../envoy/config/listener/v3/QuicProtocolOptions'; +import type { TypedExtensionConfig as _envoy_config_core_v3_TypedExtensionConfig, TypedExtensionConfig__Output as _envoy_config_core_v3_TypedExtensionConfig__Output } from '../../../../envoy/config/core/v3/TypedExtensionConfig'; + +/** + * [#next-free-field: 9] + */ +export interface UdpListenerConfig { + /** + * UDP socket configuration for the listener. The default for + * :ref:`prefer_gro ` is false for + * listener sockets. If receiving a large amount of datagrams from a small number of sources, it + * may be worthwhile to enable this option after performance testing. + */ + 'downstream_socket_config'?: (_envoy_config_core_v3_UdpSocketConfig | null); + /** + * Configuration for QUIC protocol. If empty, QUIC will not be enabled on this listener. Set + * to the default object to enable QUIC without modifying any additional options. + */ + 'quic_options'?: (_envoy_config_listener_v3_QuicProtocolOptions | null); + /** + * Configuration for the UDP packet writer. If empty, HTTP/3 will use GSO if available + * (:ref:`UdpDefaultWriterFactory `) + * or the default kernel sendmsg if not, + * (:ref:`UdpDefaultWriterFactory `) + * and raw UDP will use kernel sendmsg. + * [#extension-category: envoy.udp_packet_writer] + */ + 'udp_packet_packet_writer_config'?: (_envoy_config_core_v3_TypedExtensionConfig | null); +} + +/** + * [#next-free-field: 9] + */ +export interface UdpListenerConfig__Output { + /** + * UDP socket configuration for the listener. The default for + * :ref:`prefer_gro ` is false for + * listener sockets. If receiving a large amount of datagrams from a small number of sources, it + * may be worthwhile to enable this option after performance testing. + */ + 'downstream_socket_config': (_envoy_config_core_v3_UdpSocketConfig__Output | null); + /** + * Configuration for QUIC protocol. If empty, QUIC will not be enabled on this listener. Set + * to the default object to enable QUIC without modifying any additional options. + */ + 'quic_options': (_envoy_config_listener_v3_QuicProtocolOptions__Output | null); + /** + * Configuration for the UDP packet writer. If empty, HTTP/3 will use GSO if available + * (:ref:`UdpDefaultWriterFactory `) + * or the default kernel sendmsg if not, + * (:ref:`UdpDefaultWriterFactory `) + * and raw UDP will use kernel sendmsg. + * [#extension-category: envoy.udp_packet_writer] + */ + 'udp_packet_packet_writer_config': (_envoy_config_core_v3_TypedExtensionConfig__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ValidationListenerManager.ts b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ValidationListenerManager.ts new file mode 100644 index 000000000..3b6ccc3a6 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/listener/v3/ValidationListenerManager.ts @@ -0,0 +1,18 @@ +// Original file: deps/envoy-api/envoy/config/listener/v3/listener.proto + + +/** + * A placeholder proto so that users can explicitly configure the standard + * Validation Listener Manager via the bootstrap's :ref:`listener_manager `. + * [#not-implemented-hide:] + */ +export interface ValidationListenerManager { +} + +/** + * A placeholder proto so that users can explicitly configure the standard + * Validation Listener Manager via the bootstrap's :ref:`listener_manager `. + * [#not-implemented-hide:] + */ +export interface ValidationListenerManager__Output { +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/route/v3/ClusterSpecifierPlugin.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/ClusterSpecifierPlugin.ts new file mode 100644 index 000000000..3742eeb6b --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/ClusterSpecifierPlugin.ts @@ -0,0 +1,39 @@ +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto + +import type { TypedExtensionConfig as _envoy_config_core_v3_TypedExtensionConfig, TypedExtensionConfig__Output as _envoy_config_core_v3_TypedExtensionConfig__Output } from '../../../../envoy/config/core/v3/TypedExtensionConfig'; + +/** + * Configuration for a cluster specifier plugin. + */ +export interface ClusterSpecifierPlugin { + /** + * The name of the plugin and its opaque configuration. + */ + 'extension'?: (_envoy_config_core_v3_TypedExtensionConfig | null); + /** + * If is_optional is not set or is set to false and the plugin defined by this message is not a + * supported type, the containing resource is NACKed. If is_optional is set to true, the resource + * would not be NACKed for this reason. In this case, routes referencing this plugin's name would + * not be treated as an illegal configuration, but would result in a failure if the route is + * selected. + */ + 'is_optional'?: (boolean); +} + +/** + * Configuration for a cluster specifier plugin. + */ +export interface ClusterSpecifierPlugin__Output { + /** + * The name of the plugin and its opaque configuration. + */ + 'extension': (_envoy_config_core_v3_TypedExtensionConfig__Output | null); + /** + * If is_optional is not set or is set to false and the plugin defined by this message is not a + * supported type, the containing resource is NACKed. If is_optional is set to true, the resource + * would not be NACKed for this reason. In this case, routes referencing this plugin's name would + * not be treated as an illegal configuration, but would result in a failure if the route is + * selected. + */ + 'is_optional': (boolean); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/route/v3/CorsPolicy.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/CorsPolicy.ts new file mode 100644 index 000000000..8a74b0658 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/CorsPolicy.ts @@ -0,0 +1,143 @@ +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto + +import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; +import type { RuntimeFractionalPercent as _envoy_config_core_v3_RuntimeFractionalPercent, RuntimeFractionalPercent__Output as _envoy_config_core_v3_RuntimeFractionalPercent__Output } from '../../../../envoy/config/core/v3/RuntimeFractionalPercent'; +import type { StringMatcher as _envoy_type_matcher_v3_StringMatcher, StringMatcher__Output as _envoy_type_matcher_v3_StringMatcher__Output } from '../../../../envoy/type/matcher/v3/StringMatcher'; + +/** + * Cors policy configuration. + * + * .. attention:: + * + * This message has been deprecated. Please use + * :ref:`CorsPolicy in filter extension ` + * as as alternative. + * + * [#next-free-field: 13] + */ +export interface CorsPolicy { + /** + * Specifies the content for the ``access-control-allow-methods`` header. + */ + 'allow_methods'?: (string); + /** + * Specifies the content for the ``access-control-allow-headers`` header. + */ + 'allow_headers'?: (string); + /** + * Specifies the content for the ``access-control-expose-headers`` header. + */ + 'expose_headers'?: (string); + /** + * Specifies the content for the ``access-control-max-age`` header. + */ + 'max_age'?: (string); + /** + * Specifies whether the resource allows credentials. + */ + 'allow_credentials'?: (_google_protobuf_BoolValue | null); + /** + * Specifies the % of requests for which the CORS filter is enabled. + * + * If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS + * filter will be enabled for 100% of the requests. + * + * If :ref:`runtime_key ` is + * specified, Envoy will lookup the runtime key to get the percentage of requests to filter. + */ + 'filter_enabled'?: (_envoy_config_core_v3_RuntimeFractionalPercent | null); + /** + * Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not + * enforced. + * + * This field is intended to be used when ``filter_enabled`` and ``enabled`` are off. One of those + * fields have to explicitly disable the filter in order for this setting to take effect. + * + * If :ref:`runtime_key ` is specified, + * Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate + * and track the request's ``Origin`` to determine if it's valid but will not enforce any policies. + */ + 'shadow_enabled'?: (_envoy_config_core_v3_RuntimeFractionalPercent | null); + /** + * Specifies string patterns that match allowed origins. An origin is allowed if any of the + * string matchers match. + */ + 'allow_origin_string_match'?: (_envoy_type_matcher_v3_StringMatcher)[]; + /** + * Specify whether allow requests whose target server's IP address is more private than that from + * which the request initiator was fetched. + * + * More details refer to https://developer.chrome.com/blog/private-network-access-preflight. + */ + 'allow_private_network_access'?: (_google_protobuf_BoolValue | null); + 'enabled_specifier'?: "filter_enabled"; +} + +/** + * Cors policy configuration. + * + * .. attention:: + * + * This message has been deprecated. Please use + * :ref:`CorsPolicy in filter extension ` + * as as alternative. + * + * [#next-free-field: 13] + */ +export interface CorsPolicy__Output { + /** + * Specifies the content for the ``access-control-allow-methods`` header. + */ + 'allow_methods': (string); + /** + * Specifies the content for the ``access-control-allow-headers`` header. + */ + 'allow_headers': (string); + /** + * Specifies the content for the ``access-control-expose-headers`` header. + */ + 'expose_headers': (string); + /** + * Specifies the content for the ``access-control-max-age`` header. + */ + 'max_age': (string); + /** + * Specifies whether the resource allows credentials. + */ + 'allow_credentials': (_google_protobuf_BoolValue__Output | null); + /** + * Specifies the % of requests for which the CORS filter is enabled. + * + * If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS + * filter will be enabled for 100% of the requests. + * + * If :ref:`runtime_key ` is + * specified, Envoy will lookup the runtime key to get the percentage of requests to filter. + */ + 'filter_enabled'?: (_envoy_config_core_v3_RuntimeFractionalPercent__Output | null); + /** + * Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not + * enforced. + * + * This field is intended to be used when ``filter_enabled`` and ``enabled`` are off. One of those + * fields have to explicitly disable the filter in order for this setting to take effect. + * + * If :ref:`runtime_key ` is specified, + * Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate + * and track the request's ``Origin`` to determine if it's valid but will not enforce any policies. + */ + 'shadow_enabled': (_envoy_config_core_v3_RuntimeFractionalPercent__Output | null); + /** + * Specifies string patterns that match allowed origins. An origin is allowed if any of the + * string matchers match. + */ + 'allow_origin_string_match': (_envoy_type_matcher_v3_StringMatcher__Output)[]; + /** + * Specify whether allow requests whose target server's IP address is more private than that from + * which the request initiator was fetched. + * + * More details refer to https://developer.chrome.com/blog/private-network-access-preflight. + */ + 'allow_private_network_access': (_google_protobuf_BoolValue__Output | null); + 'enabled_specifier': "filter_enabled"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/Decorator.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/Decorator.ts similarity index 87% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/route/Decorator.ts rename to packages/grpc-js-xds/src/generated/envoy/config/route/v3/Decorator.ts index 68c91327d..fa8bef1d8 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/Decorator.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/Decorator.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/api/v2/route/route_components.proto +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; @@ -17,7 +17,7 @@ export interface Decorator { /** * Whether the decorated details should be propagated to the other party. The default is true. */ - 'propagate'?: (_google_protobuf_BoolValue); + 'propagate'?: (_google_protobuf_BoolValue | null); } export interface Decorator__Output { @@ -35,5 +35,5 @@ export interface Decorator__Output { /** * Whether the decorated details should be propagated to the other party. The default is true. */ - 'propagate'?: (_google_protobuf_BoolValue__Output); + 'propagate': (_google_protobuf_BoolValue__Output | null); } diff --git a/packages/grpc-js-xds/src/generated/envoy/config/route/v3/DirectResponseAction.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/DirectResponseAction.ts new file mode 100644 index 000000000..7c0f9ee67 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/DirectResponseAction.ts @@ -0,0 +1,39 @@ +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto + +import type { DataSource as _envoy_config_core_v3_DataSource, DataSource__Output as _envoy_config_core_v3_DataSource__Output } from '../../../../envoy/config/core/v3/DataSource'; + +export interface DirectResponseAction { + /** + * Specifies the HTTP response status to be returned. + */ + 'status'?: (number); + /** + * Specifies the content of the response body. If this setting is omitted, + * no body is included in the generated response. + * + * .. note:: + * + * Headers can be specified using ``response_headers_to_add`` in the enclosing + * :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` or + * :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`. + */ + 'body'?: (_envoy_config_core_v3_DataSource | null); +} + +export interface DirectResponseAction__Output { + /** + * Specifies the HTTP response status to be returned. + */ + 'status': (number); + /** + * Specifies the content of the response body. If this setting is omitted, + * no body is included in the generated response. + * + * .. note:: + * + * Headers can be specified using ``response_headers_to_add`` in the enclosing + * :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` or + * :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`. + */ + 'body': (_envoy_config_core_v3_DataSource__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/FilterAction.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/FilterAction.ts similarity index 62% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/route/FilterAction.ts rename to packages/grpc-js-xds/src/generated/envoy/config/route/v3/FilterAction.ts index a41f3f417..2765c0799 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/FilterAction.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/FilterAction.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/api/v2/route/route_components.proto +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; @@ -6,12 +6,12 @@ import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__ * A filter-defined action type. */ export interface FilterAction { - 'action'?: (_google_protobuf_Any); + 'action'?: (_google_protobuf_Any | null); } /** * A filter-defined action type. */ export interface FilterAction__Output { - 'action'?: (_google_protobuf_Any__Output); + 'action': (_google_protobuf_Any__Output | null); } diff --git a/packages/grpc-js-xds/src/generated/envoy/config/route/v3/FilterConfig.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/FilterConfig.ts new file mode 100644 index 000000000..e5a3b5778 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/FilterConfig.ts @@ -0,0 +1,79 @@ +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto + +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; + +/** + * A simple wrapper for an HTTP filter config. This is intended to be used as a wrapper for the + * map value in + * :ref:`VirtualHost.typed_per_filter_config`, + * :ref:`Route.typed_per_filter_config`, + * or :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config` + * to add additional flags to the filter. + */ +export interface FilterConfig { + /** + * The filter config. + */ + 'config'?: (_google_protobuf_Any | null); + /** + * If true, the filter is optional, meaning that if the client does + * not support the specified filter, it may ignore the map entry rather + * than rejecting the config. + */ + 'is_optional'?: (boolean); + /** + * If true, the filter is disabled in the route or virtual host and the ``config`` field is ignored. + * + * .. note:: + * + * This field will take effect when the request arrive and filter chain is created for the request. + * If initial route is selected for the request and a filter is disabled in the initial route, then + * the filter will not be added to the filter chain. + * And if the request is mutated later and re-match to another route, the disabled filter by the + * initial route will not be added back to the filter chain because the filter chain is already + * created and it is too late to change the chain. + * + * This field only make sense for the downstream HTTP filters for now. + * + * [#not-implemented-hide:] + */ + 'disabled'?: (boolean); +} + +/** + * A simple wrapper for an HTTP filter config. This is intended to be used as a wrapper for the + * map value in + * :ref:`VirtualHost.typed_per_filter_config`, + * :ref:`Route.typed_per_filter_config`, + * or :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config` + * to add additional flags to the filter. + */ +export interface FilterConfig__Output { + /** + * The filter config. + */ + 'config': (_google_protobuf_Any__Output | null); + /** + * If true, the filter is optional, meaning that if the client does + * not support the specified filter, it may ignore the map entry rather + * than rejecting the config. + */ + 'is_optional': (boolean); + /** + * If true, the filter is disabled in the route or virtual host and the ``config`` field is ignored. + * + * .. note:: + * + * This field will take effect when the request arrive and filter chain is created for the request. + * If initial route is selected for the request and a filter is disabled in the initial route, then + * the filter will not be added to the filter chain. + * And if the request is mutated later and re-match to another route, the disabled filter by the + * initial route will not be added back to the filter chain because the filter chain is already + * created and it is too late to change the chain. + * + * This field only make sense for the downstream HTTP filters for now. + * + * [#not-implemented-hide:] + */ + 'disabled': (boolean); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/route/v3/HeaderMatcher.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/HeaderMatcher.ts new file mode 100644 index 000000000..b5b085ae7 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/HeaderMatcher.ts @@ -0,0 +1,303 @@ +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto + +import type { Int64Range as _envoy_type_v3_Int64Range, Int64Range__Output as _envoy_type_v3_Int64Range__Output } from '../../../../envoy/type/v3/Int64Range'; +import type { RegexMatcher as _envoy_type_matcher_v3_RegexMatcher, RegexMatcher__Output as _envoy_type_matcher_v3_RegexMatcher__Output } from '../../../../envoy/type/matcher/v3/RegexMatcher'; +import type { StringMatcher as _envoy_type_matcher_v3_StringMatcher, StringMatcher__Output as _envoy_type_matcher_v3_StringMatcher__Output } from '../../../../envoy/type/matcher/v3/StringMatcher'; + +/** + * .. attention:: + * + * Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 ``Host`` + * header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. + * + * .. attention:: + * + * To route on HTTP method, use the special HTTP/2 ``:method`` header. This works for both + * HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., + * + * .. code-block:: json + * + * { + * "name": ":method", + * "string_match": { + * "exact": "POST" + * } + * } + * + * .. attention:: + * In the absence of any header match specifier, match will default to :ref:`present_match + * `. i.e, a request that has the :ref:`name + * ` header will match, regardless of the header's + * value. + * + * [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] + * [#next-free-field: 15] + */ +export interface HeaderMatcher { + /** + * Specifies the name of the header in the request. + */ + 'name'?: (string); + /** + * If specified, header match will be performed based on the value of the header. + * This field is deprecated. Please use :ref:`string_match `. + * @deprecated + */ + 'exact_match'?: (string); + /** + * If specified, header match will be performed based on range. + * The rule will match if the request header value is within this range. + * The entire request header value must represent an integer in base 10 notation: consisting of + * an optional plus or minus sign followed by a sequence of digits. The rule will not match if + * the header value does not represent an integer. Match will fail for empty values, floating + * point numbers or if only a subsequence of the header value is an integer. + * + * Examples: + * + * * For range [-10,0), route will match for header value -1, but not for 0, ``somestring``, 10.9, + * ``-1somestring`` + */ + 'range_match'?: (_envoy_type_v3_Int64Range | null); + /** + * If specified as true, header match will be performed based on whether the header is in the + * request. If specified as false, header match will be performed based on whether the header is absent. + */ + 'present_match'?: (boolean); + /** + * If specified, the match result will be inverted before checking. Defaults to false. + * + * Examples: + * + * * The regex ``\d{3}`` does not match the value ``1234``, so it will match when inverted. + * * The range [-10,0) will match the value -1, so it will not match when inverted. + */ + 'invert_match'?: (boolean); + /** + * If specified, header match will be performed based on the prefix of the header value. + * Note: empty prefix is not allowed, please use present_match instead. + * This field is deprecated. Please use :ref:`string_match `. + * + * Examples: + * + * * The prefix ``abcd`` matches the value ``abcdxyz``, but not for ``abcxyz``. + * @deprecated + */ + 'prefix_match'?: (string); + /** + * If specified, header match will be performed based on the suffix of the header value. + * Note: empty suffix is not allowed, please use present_match instead. + * This field is deprecated. Please use :ref:`string_match `. + * + * Examples: + * + * * The suffix ``abcd`` matches the value ``xyzabcd``, but not for ``xyzbcd``. + * @deprecated + */ + 'suffix_match'?: (string); + /** + * If specified, this regex string is a regular expression rule which implies the entire request + * header value must match the regex. The rule will not match if only a subsequence of the + * request header value matches the regex. + * This field is deprecated. Please use :ref:`string_match `. + * @deprecated + */ + 'safe_regex_match'?: (_envoy_type_matcher_v3_RegexMatcher | null); + /** + * If specified, header match will be performed based on whether the header value contains + * the given value or not. + * Note: empty contains match is not allowed, please use present_match instead. + * This field is deprecated. Please use :ref:`string_match `. + * + * Examples: + * + * * The value ``abcd`` matches the value ``xyzabcdpqr``, but not for ``xyzbcdpqr``. + * @deprecated + */ + 'contains_match'?: (string); + /** + * If specified, header match will be performed based on the string match of the header value. + */ + 'string_match'?: (_envoy_type_matcher_v3_StringMatcher | null); + /** + * If specified, for any header match rule, if the header match rule specified header + * does not exist, this header value will be treated as empty. Defaults to false. + * + * Examples: + * + * * The header match rule specified header "header1" to range match of [0, 10], + * :ref:`invert_match ` + * is set to true and :ref:`treat_missing_header_as_empty ` + * is set to true; The "header1" header is not present. The match rule will + * treat the "header1" as an empty header. The empty header does not match the range, + * so it will match when inverted. + * * The header match rule specified header "header2" to range match of [0, 10], + * :ref:`invert_match ` + * is set to true and :ref:`treat_missing_header_as_empty ` + * is set to false; The "header2" header is not present and the header + * matcher rule for "header2" will be ignored so it will not match. + * * The header match rule specified header "header3" to a string regex match + * ``^$`` which means an empty string, and + * :ref:`treat_missing_header_as_empty ` + * is set to true; The "header3" header is not present. + * The match rule will treat the "header3" header as an empty header so it will match. + * * The header match rule specified header "header4" to a string regex match + * ``^$`` which means an empty string, and + * :ref:`treat_missing_header_as_empty ` + * is set to false; The "header4" header is not present. + * The match rule for "header4" will be ignored so it will not match. + */ + 'treat_missing_header_as_empty'?: (boolean); + /** + * Specifies how the header match will be performed to route the request. + */ + 'header_match_specifier'?: "exact_match"|"safe_regex_match"|"range_match"|"present_match"|"prefix_match"|"suffix_match"|"contains_match"|"string_match"; +} + +/** + * .. attention:: + * + * Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 ``Host`` + * header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. + * + * .. attention:: + * + * To route on HTTP method, use the special HTTP/2 ``:method`` header. This works for both + * HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., + * + * .. code-block:: json + * + * { + * "name": ":method", + * "string_match": { + * "exact": "POST" + * } + * } + * + * .. attention:: + * In the absence of any header match specifier, match will default to :ref:`present_match + * `. i.e, a request that has the :ref:`name + * ` header will match, regardless of the header's + * value. + * + * [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] + * [#next-free-field: 15] + */ +export interface HeaderMatcher__Output { + /** + * Specifies the name of the header in the request. + */ + 'name': (string); + /** + * If specified, header match will be performed based on the value of the header. + * This field is deprecated. Please use :ref:`string_match `. + * @deprecated + */ + 'exact_match'?: (string); + /** + * If specified, header match will be performed based on range. + * The rule will match if the request header value is within this range. + * The entire request header value must represent an integer in base 10 notation: consisting of + * an optional plus or minus sign followed by a sequence of digits. The rule will not match if + * the header value does not represent an integer. Match will fail for empty values, floating + * point numbers or if only a subsequence of the header value is an integer. + * + * Examples: + * + * * For range [-10,0), route will match for header value -1, but not for 0, ``somestring``, 10.9, + * ``-1somestring`` + */ + 'range_match'?: (_envoy_type_v3_Int64Range__Output | null); + /** + * If specified as true, header match will be performed based on whether the header is in the + * request. If specified as false, header match will be performed based on whether the header is absent. + */ + 'present_match'?: (boolean); + /** + * If specified, the match result will be inverted before checking. Defaults to false. + * + * Examples: + * + * * The regex ``\d{3}`` does not match the value ``1234``, so it will match when inverted. + * * The range [-10,0) will match the value -1, so it will not match when inverted. + */ + 'invert_match': (boolean); + /** + * If specified, header match will be performed based on the prefix of the header value. + * Note: empty prefix is not allowed, please use present_match instead. + * This field is deprecated. Please use :ref:`string_match `. + * + * Examples: + * + * * The prefix ``abcd`` matches the value ``abcdxyz``, but not for ``abcxyz``. + * @deprecated + */ + 'prefix_match'?: (string); + /** + * If specified, header match will be performed based on the suffix of the header value. + * Note: empty suffix is not allowed, please use present_match instead. + * This field is deprecated. Please use :ref:`string_match `. + * + * Examples: + * + * * The suffix ``abcd`` matches the value ``xyzabcd``, but not for ``xyzbcd``. + * @deprecated + */ + 'suffix_match'?: (string); + /** + * If specified, this regex string is a regular expression rule which implies the entire request + * header value must match the regex. The rule will not match if only a subsequence of the + * request header value matches the regex. + * This field is deprecated. Please use :ref:`string_match `. + * @deprecated + */ + 'safe_regex_match'?: (_envoy_type_matcher_v3_RegexMatcher__Output | null); + /** + * If specified, header match will be performed based on whether the header value contains + * the given value or not. + * Note: empty contains match is not allowed, please use present_match instead. + * This field is deprecated. Please use :ref:`string_match `. + * + * Examples: + * + * * The value ``abcd`` matches the value ``xyzabcdpqr``, but not for ``xyzbcdpqr``. + * @deprecated + */ + 'contains_match'?: (string); + /** + * If specified, header match will be performed based on the string match of the header value. + */ + 'string_match'?: (_envoy_type_matcher_v3_StringMatcher__Output | null); + /** + * If specified, for any header match rule, if the header match rule specified header + * does not exist, this header value will be treated as empty. Defaults to false. + * + * Examples: + * + * * The header match rule specified header "header1" to range match of [0, 10], + * :ref:`invert_match ` + * is set to true and :ref:`treat_missing_header_as_empty ` + * is set to true; The "header1" header is not present. The match rule will + * treat the "header1" as an empty header. The empty header does not match the range, + * so it will match when inverted. + * * The header match rule specified header "header2" to range match of [0, 10], + * :ref:`invert_match ` + * is set to true and :ref:`treat_missing_header_as_empty ` + * is set to false; The "header2" header is not present and the header + * matcher rule for "header2" will be ignored so it will not match. + * * The header match rule specified header "header3" to a string regex match + * ``^$`` which means an empty string, and + * :ref:`treat_missing_header_as_empty ` + * is set to true; The "header3" header is not present. + * The match rule will treat the "header3" header as an empty header so it will match. + * * The header match rule specified header "header4" to a string regex match + * ``^$`` which means an empty string, and + * :ref:`treat_missing_header_as_empty ` + * is set to false; The "header4" header is not present. + * The match rule for "header4" will be ignored so it will not match. + */ + 'treat_missing_header_as_empty': (boolean); + /** + * Specifies how the header match will be performed to route the request. + */ + 'header_match_specifier': "exact_match"|"safe_regex_match"|"range_match"|"present_match"|"prefix_match"|"suffix_match"|"contains_match"|"string_match"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/route/v3/HedgePolicy.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/HedgePolicy.ts new file mode 100644 index 000000000..302b6d284 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/HedgePolicy.ts @@ -0,0 +1,76 @@ +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto + +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; +import type { FractionalPercent as _envoy_type_v3_FractionalPercent, FractionalPercent__Output as _envoy_type_v3_FractionalPercent__Output } from '../../../../envoy/type/v3/FractionalPercent'; + +/** + * HTTP request hedging :ref:`architecture overview `. + */ +export interface HedgePolicy { + /** + * Specifies the number of initial requests that should be sent upstream. + * Must be at least 1. + * Defaults to 1. + * [#not-implemented-hide:] + */ + 'initial_requests'?: (_google_protobuf_UInt32Value | null); + /** + * Specifies a probability that an additional upstream request should be sent + * on top of what is specified by initial_requests. + * Defaults to 0. + * [#not-implemented-hide:] + */ + 'additional_request_chance'?: (_envoy_type_v3_FractionalPercent | null); + /** + * Indicates that a hedged request should be sent when the per-try timeout is hit. + * This means that a retry will be issued without resetting the original request, leaving multiple upstream requests in flight. + * The first request to complete successfully will be the one returned to the caller. + * + * * At any time, a successful response (i.e. not triggering any of the retry-on conditions) would be returned to the client. + * * Before per-try timeout, an error response (per retry-on conditions) would be retried immediately or returned ot the client + * if there are no more retries left. + * * After per-try timeout, an error response would be discarded, as a retry in the form of a hedged request is already in progress. + * + * Note: For this to have effect, you must have a :ref:`RetryPolicy ` that retries at least + * one error code and specifies a maximum number of retries. + * + * Defaults to false. + */ + 'hedge_on_per_try_timeout'?: (boolean); +} + +/** + * HTTP request hedging :ref:`architecture overview `. + */ +export interface HedgePolicy__Output { + /** + * Specifies the number of initial requests that should be sent upstream. + * Must be at least 1. + * Defaults to 1. + * [#not-implemented-hide:] + */ + 'initial_requests': (_google_protobuf_UInt32Value__Output | null); + /** + * Specifies a probability that an additional upstream request should be sent + * on top of what is specified by initial_requests. + * Defaults to 0. + * [#not-implemented-hide:] + */ + 'additional_request_chance': (_envoy_type_v3_FractionalPercent__Output | null); + /** + * Indicates that a hedged request should be sent when the per-try timeout is hit. + * This means that a retry will be issued without resetting the original request, leaving multiple upstream requests in flight. + * The first request to complete successfully will be the one returned to the caller. + * + * * At any time, a successful response (i.e. not triggering any of the retry-on conditions) would be returned to the client. + * * Before per-try timeout, an error response (per retry-on conditions) would be retried immediately or returned ot the client + * if there are no more retries left. + * * After per-try timeout, an error response would be discarded, as a retry in the form of a hedged request is already in progress. + * + * Note: For this to have effect, you must have a :ref:`RetryPolicy ` that retries at least + * one error code and specifies a maximum number of retries. + * + * Defaults to false. + */ + 'hedge_on_per_try_timeout': (boolean); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/route/v3/InternalRedirectPolicy.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/InternalRedirectPolicy.ts new file mode 100644 index 000000000..ab74df94e --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/InternalRedirectPolicy.ts @@ -0,0 +1,74 @@ +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto + +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; +import type { TypedExtensionConfig as _envoy_config_core_v3_TypedExtensionConfig, TypedExtensionConfig__Output as _envoy_config_core_v3_TypedExtensionConfig__Output } from '../../../../envoy/config/core/v3/TypedExtensionConfig'; + +/** + * HTTP Internal Redirect :ref:`architecture overview `. + */ +export interface InternalRedirectPolicy { + /** + * An internal redirect is not handled, unless the number of previous internal redirects that a + * downstream request has encountered is lower than this value. + * In the case where a downstream request is bounced among multiple routes by internal redirect, + * the first route that hits this threshold, or does not set :ref:`internal_redirect_policy + * ` + * will pass the redirect back to downstream. + * + * If not specified, at most one redirect will be followed. + */ + 'max_internal_redirects'?: (_google_protobuf_UInt32Value | null); + /** + * Defines what upstream response codes are allowed to trigger internal redirect. If unspecified, + * only 302 will be treated as internal redirect. + * Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored. + */ + 'redirect_response_codes'?: (number)[]; + /** + * Specifies a list of predicates that are queried when an upstream response is deemed + * to trigger an internal redirect by all other criteria. Any predicate in the list can reject + * the redirect, causing the response to be proxied to downstream. + * [#extension-category: envoy.internal_redirect_predicates] + */ + 'predicates'?: (_envoy_config_core_v3_TypedExtensionConfig)[]; + /** + * Allow internal redirect to follow a target URI with a different scheme than the value of + * x-forwarded-proto. The default is false. + */ + 'allow_cross_scheme_redirect'?: (boolean); +} + +/** + * HTTP Internal Redirect :ref:`architecture overview `. + */ +export interface InternalRedirectPolicy__Output { + /** + * An internal redirect is not handled, unless the number of previous internal redirects that a + * downstream request has encountered is lower than this value. + * In the case where a downstream request is bounced among multiple routes by internal redirect, + * the first route that hits this threshold, or does not set :ref:`internal_redirect_policy + * ` + * will pass the redirect back to downstream. + * + * If not specified, at most one redirect will be followed. + */ + 'max_internal_redirects': (_google_protobuf_UInt32Value__Output | null); + /** + * Defines what upstream response codes are allowed to trigger internal redirect. If unspecified, + * only 302 will be treated as internal redirect. + * Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored. + */ + 'redirect_response_codes': (number)[]; + /** + * Specifies a list of predicates that are queried when an upstream response is deemed + * to trigger an internal redirect by all other criteria. Any predicate in the list can reject + * the redirect, causing the response to be proxied to downstream. + * [#extension-category: envoy.internal_redirect_predicates] + */ + 'predicates': (_envoy_config_core_v3_TypedExtensionConfig__Output)[]; + /** + * Allow internal redirect to follow a target URI with a different scheme than the value of + * x-forwarded-proto. The default is false. + */ + 'allow_cross_scheme_redirect': (boolean); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/route/v3/NonForwardingAction.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/NonForwardingAction.ts new file mode 100644 index 000000000..e9c67d44f --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/NonForwardingAction.ts @@ -0,0 +1,14 @@ +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto + + +/** + * [#not-implemented-hide:] + */ +export interface NonForwardingAction { +} + +/** + * [#not-implemented-hide:] + */ +export interface NonForwardingAction__Output { +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/route/v3/QueryParameterMatcher.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/QueryParameterMatcher.ts new file mode 100644 index 000000000..b98b6329b --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/QueryParameterMatcher.ts @@ -0,0 +1,47 @@ +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto + +import type { StringMatcher as _envoy_type_matcher_v3_StringMatcher, StringMatcher__Output as _envoy_type_matcher_v3_StringMatcher__Output } from '../../../../envoy/type/matcher/v3/StringMatcher'; + +/** + * Query parameter matching treats the query string of a request's :path header + * as an ampersand-separated list of keys and/or key=value elements. + * [#next-free-field: 7] + */ +export interface QueryParameterMatcher { + /** + * Specifies the name of a key that must be present in the requested + * ``path``'s query string. + */ + 'name'?: (string); + /** + * Specifies whether a query parameter value should match against a string. + */ + 'string_match'?: (_envoy_type_matcher_v3_StringMatcher | null); + /** + * Specifies whether a query parameter should be present. + */ + 'present_match'?: (boolean); + 'query_parameter_match_specifier'?: "string_match"|"present_match"; +} + +/** + * Query parameter matching treats the query string of a request's :path header + * as an ampersand-separated list of keys and/or key=value elements. + * [#next-free-field: 7] + */ +export interface QueryParameterMatcher__Output { + /** + * Specifies the name of a key that must be present in the requested + * ``path``'s query string. + */ + 'name': (string); + /** + * Specifies whether a query parameter value should match against a string. + */ + 'string_match'?: (_envoy_type_matcher_v3_StringMatcher__Output | null); + /** + * Specifies whether a query parameter should be present. + */ + 'present_match'?: (boolean); + 'query_parameter_match_specifier': "string_match"|"present_match"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/route/v3/RateLimit.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/RateLimit.ts new file mode 100644 index 000000000..28d17667f --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/RateLimit.ts @@ -0,0 +1,769 @@ +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto + +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; +import type { TypedExtensionConfig as _envoy_config_core_v3_TypedExtensionConfig, TypedExtensionConfig__Output as _envoy_config_core_v3_TypedExtensionConfig__Output } from '../../../../envoy/config/core/v3/TypedExtensionConfig'; +import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; +import type { HeaderMatcher as _envoy_config_route_v3_HeaderMatcher, HeaderMatcher__Output as _envoy_config_route_v3_HeaderMatcher__Output } from '../../../../envoy/config/route/v3/HeaderMatcher'; +import type { MetadataKey as _envoy_type_metadata_v3_MetadataKey, MetadataKey__Output as _envoy_type_metadata_v3_MetadataKey__Output } from '../../../../envoy/type/metadata/v3/MetadataKey'; +import type { QueryParameterMatcher as _envoy_config_route_v3_QueryParameterMatcher, QueryParameterMatcher__Output as _envoy_config_route_v3_QueryParameterMatcher__Output } from '../../../../envoy/config/route/v3/QueryParameterMatcher'; + +/** + * [#next-free-field: 12] + */ +export interface _envoy_config_route_v3_RateLimit_Action { + /** + * Rate limit on source cluster. + */ + 'source_cluster'?: (_envoy_config_route_v3_RateLimit_Action_SourceCluster | null); + /** + * Rate limit on destination cluster. + */ + 'destination_cluster'?: (_envoy_config_route_v3_RateLimit_Action_DestinationCluster | null); + /** + * Rate limit on request headers. + */ + 'request_headers'?: (_envoy_config_route_v3_RateLimit_Action_RequestHeaders | null); + /** + * Rate limit on remote address. + */ + 'remote_address'?: (_envoy_config_route_v3_RateLimit_Action_RemoteAddress | null); + /** + * Rate limit on a generic key. + */ + 'generic_key'?: (_envoy_config_route_v3_RateLimit_Action_GenericKey | null); + /** + * Rate limit on the existence of request headers. + */ + 'header_value_match'?: (_envoy_config_route_v3_RateLimit_Action_HeaderValueMatch | null); + /** + * Rate limit on dynamic metadata. + * + * .. attention:: + * This field has been deprecated in favor of the :ref:`metadata ` field + * @deprecated + */ + 'dynamic_metadata'?: (_envoy_config_route_v3_RateLimit_Action_DynamicMetaData | null); + /** + * Rate limit on metadata. + */ + 'metadata'?: (_envoy_config_route_v3_RateLimit_Action_MetaData | null); + /** + * Rate limit descriptor extension. See the rate limit descriptor extensions documentation. + * + * :ref:`HTTP matching input functions ` are + * permitted as descriptor extensions. The input functions are only + * looked up if there is no rate limit descriptor extension matching + * the type URL. + * + * [#extension-category: envoy.rate_limit_descriptors] + */ + 'extension'?: (_envoy_config_core_v3_TypedExtensionConfig | null); + /** + * Rate limit on masked remote address. + */ + 'masked_remote_address'?: (_envoy_config_route_v3_RateLimit_Action_MaskedRemoteAddress | null); + /** + * Rate limit on the existence of query parameters. + */ + 'query_parameter_value_match'?: (_envoy_config_route_v3_RateLimit_Action_QueryParameterValueMatch | null); + 'action_specifier'?: "source_cluster"|"destination_cluster"|"request_headers"|"remote_address"|"generic_key"|"header_value_match"|"dynamic_metadata"|"metadata"|"extension"|"masked_remote_address"|"query_parameter_value_match"; +} + +/** + * [#next-free-field: 12] + */ +export interface _envoy_config_route_v3_RateLimit_Action__Output { + /** + * Rate limit on source cluster. + */ + 'source_cluster'?: (_envoy_config_route_v3_RateLimit_Action_SourceCluster__Output | null); + /** + * Rate limit on destination cluster. + */ + 'destination_cluster'?: (_envoy_config_route_v3_RateLimit_Action_DestinationCluster__Output | null); + /** + * Rate limit on request headers. + */ + 'request_headers'?: (_envoy_config_route_v3_RateLimit_Action_RequestHeaders__Output | null); + /** + * Rate limit on remote address. + */ + 'remote_address'?: (_envoy_config_route_v3_RateLimit_Action_RemoteAddress__Output | null); + /** + * Rate limit on a generic key. + */ + 'generic_key'?: (_envoy_config_route_v3_RateLimit_Action_GenericKey__Output | null); + /** + * Rate limit on the existence of request headers. + */ + 'header_value_match'?: (_envoy_config_route_v3_RateLimit_Action_HeaderValueMatch__Output | null); + /** + * Rate limit on dynamic metadata. + * + * .. attention:: + * This field has been deprecated in favor of the :ref:`metadata ` field + * @deprecated + */ + 'dynamic_metadata'?: (_envoy_config_route_v3_RateLimit_Action_DynamicMetaData__Output | null); + /** + * Rate limit on metadata. + */ + 'metadata'?: (_envoy_config_route_v3_RateLimit_Action_MetaData__Output | null); + /** + * Rate limit descriptor extension. See the rate limit descriptor extensions documentation. + * + * :ref:`HTTP matching input functions ` are + * permitted as descriptor extensions. The input functions are only + * looked up if there is no rate limit descriptor extension matching + * the type URL. + * + * [#extension-category: envoy.rate_limit_descriptors] + */ + 'extension'?: (_envoy_config_core_v3_TypedExtensionConfig__Output | null); + /** + * Rate limit on masked remote address. + */ + 'masked_remote_address'?: (_envoy_config_route_v3_RateLimit_Action_MaskedRemoteAddress__Output | null); + /** + * Rate limit on the existence of query parameters. + */ + 'query_parameter_value_match'?: (_envoy_config_route_v3_RateLimit_Action_QueryParameterValueMatch__Output | null); + 'action_specifier': "source_cluster"|"destination_cluster"|"request_headers"|"remote_address"|"generic_key"|"header_value_match"|"dynamic_metadata"|"metadata"|"extension"|"masked_remote_address"|"query_parameter_value_match"; +} + +/** + * The following descriptor entry is appended to the descriptor: + * + * .. code-block:: cpp + * + * ("destination_cluster", "") + * + * Once a request matches against a route table rule, a routed cluster is determined by one of + * the following :ref:`route table configuration ` + * settings: + * + * * :ref:`cluster ` indicates the upstream cluster + * to route to. + * * :ref:`weighted_clusters ` + * chooses a cluster randomly from a set of clusters with attributed weight. + * * :ref:`cluster_header ` indicates which + * header in the request contains the target cluster. + */ +export interface _envoy_config_route_v3_RateLimit_Action_DestinationCluster { +} + +/** + * The following descriptor entry is appended to the descriptor: + * + * .. code-block:: cpp + * + * ("destination_cluster", "") + * + * Once a request matches against a route table rule, a routed cluster is determined by one of + * the following :ref:`route table configuration ` + * settings: + * + * * :ref:`cluster ` indicates the upstream cluster + * to route to. + * * :ref:`weighted_clusters ` + * chooses a cluster randomly from a set of clusters with attributed weight. + * * :ref:`cluster_header ` indicates which + * header in the request contains the target cluster. + */ +export interface _envoy_config_route_v3_RateLimit_Action_DestinationCluster__Output { +} + +/** + * The following descriptor entry is appended when the + * :ref:`dynamic metadata ` contains a key value: + * + * .. code-block:: cpp + * + * ("", "") + * + * .. attention:: + * This action has been deprecated in favor of the :ref:`metadata ` action + */ +export interface _envoy_config_route_v3_RateLimit_Action_DynamicMetaData { + /** + * The key to use in the descriptor entry. + */ + 'descriptor_key'?: (string); + /** + * Metadata struct that defines the key and path to retrieve the string value. A match will + * only happen if the value in the dynamic metadata is of type string. + */ + 'metadata_key'?: (_envoy_type_metadata_v3_MetadataKey | null); + /** + * An optional value to use if ``metadata_key`` is empty. If not set and + * no value is present under the metadata_key then no descriptor is generated. + */ + 'default_value'?: (string); +} + +/** + * The following descriptor entry is appended when the + * :ref:`dynamic metadata ` contains a key value: + * + * .. code-block:: cpp + * + * ("", "") + * + * .. attention:: + * This action has been deprecated in favor of the :ref:`metadata ` action + */ +export interface _envoy_config_route_v3_RateLimit_Action_DynamicMetaData__Output { + /** + * The key to use in the descriptor entry. + */ + 'descriptor_key': (string); + /** + * Metadata struct that defines the key and path to retrieve the string value. A match will + * only happen if the value in the dynamic metadata is of type string. + */ + 'metadata_key': (_envoy_type_metadata_v3_MetadataKey__Output | null); + /** + * An optional value to use if ``metadata_key`` is empty. If not set and + * no value is present under the metadata_key then no descriptor is generated. + */ + 'default_value': (string); +} + +/** + * Fetches the override from the dynamic metadata. + */ +export interface _envoy_config_route_v3_RateLimit_Override_DynamicMetadata { + /** + * Metadata struct that defines the key and path to retrieve the struct value. + * The value must be a struct containing an integer "requests_per_unit" property + * and a "unit" property with a value parseable to :ref:`RateLimitUnit + * enum ` + */ + 'metadata_key'?: (_envoy_type_metadata_v3_MetadataKey | null); +} + +/** + * Fetches the override from the dynamic metadata. + */ +export interface _envoy_config_route_v3_RateLimit_Override_DynamicMetadata__Output { + /** + * Metadata struct that defines the key and path to retrieve the struct value. + * The value must be a struct containing an integer "requests_per_unit" property + * and a "unit" property with a value parseable to :ref:`RateLimitUnit + * enum ` + */ + 'metadata_key': (_envoy_type_metadata_v3_MetadataKey__Output | null); +} + +/** + * The following descriptor entry is appended to the descriptor: + * + * .. code-block:: cpp + * + * ("generic_key", "") + */ +export interface _envoy_config_route_v3_RateLimit_Action_GenericKey { + /** + * The value to use in the descriptor entry. + */ + 'descriptor_value'?: (string); + /** + * An optional key to use in the descriptor entry. If not set it defaults + * to 'generic_key' as the descriptor key. + */ + 'descriptor_key'?: (string); +} + +/** + * The following descriptor entry is appended to the descriptor: + * + * .. code-block:: cpp + * + * ("generic_key", "") + */ +export interface _envoy_config_route_v3_RateLimit_Action_GenericKey__Output { + /** + * The value to use in the descriptor entry. + */ + 'descriptor_value': (string); + /** + * An optional key to use in the descriptor entry. If not set it defaults + * to 'generic_key' as the descriptor key. + */ + 'descriptor_key': (string); +} + +/** + * The following descriptor entry is appended to the descriptor: + * + * .. code-block:: cpp + * + * ("header_match", "") + */ +export interface _envoy_config_route_v3_RateLimit_Action_HeaderValueMatch { + /** + * The key to use in the descriptor entry. Defaults to ``header_match``. + */ + 'descriptor_key'?: (string); + /** + * The value to use in the descriptor entry. + */ + 'descriptor_value'?: (string); + /** + * If set to true, the action will append a descriptor entry when the + * request matches the headers. If set to false, the action will append a + * descriptor entry when the request does not match the headers. The + * default value is true. + */ + 'expect_match'?: (_google_protobuf_BoolValue | null); + /** + * Specifies a set of headers that the rate limit action should match + * on. The action will check the request’s headers against all the + * specified headers in the config. A match will happen if all the + * headers in the config are present in the request with the same values + * (or based on presence if the value field is not in the config). + */ + 'headers'?: (_envoy_config_route_v3_HeaderMatcher)[]; +} + +/** + * The following descriptor entry is appended to the descriptor: + * + * .. code-block:: cpp + * + * ("header_match", "") + */ +export interface _envoy_config_route_v3_RateLimit_Action_HeaderValueMatch__Output { + /** + * The key to use in the descriptor entry. Defaults to ``header_match``. + */ + 'descriptor_key': (string); + /** + * The value to use in the descriptor entry. + */ + 'descriptor_value': (string); + /** + * If set to true, the action will append a descriptor entry when the + * request matches the headers. If set to false, the action will append a + * descriptor entry when the request does not match the headers. The + * default value is true. + */ + 'expect_match': (_google_protobuf_BoolValue__Output | null); + /** + * Specifies a set of headers that the rate limit action should match + * on. The action will check the request’s headers against all the + * specified headers in the config. A match will happen if all the + * headers in the config are present in the request with the same values + * (or based on presence if the value field is not in the config). + */ + 'headers': (_envoy_config_route_v3_HeaderMatcher__Output)[]; +} + +/** + * The following descriptor entry is appended to the descriptor and is populated using the + * masked address from :ref:`x-forwarded-for `: + * + * .. code-block:: cpp + * + * ("masked_remote_address", "") + */ +export interface _envoy_config_route_v3_RateLimit_Action_MaskedRemoteAddress { + /** + * Length of prefix mask len for IPv4 (e.g. 0, 32). + * Defaults to 32 when unset. + * For example, trusted address from x-forwarded-for is ``192.168.1.1``, + * the descriptor entry is ("masked_remote_address", "192.168.1.1/32"); + * if mask len is 24, the descriptor entry is ("masked_remote_address", "192.168.1.0/24"). + */ + 'v4_prefix_mask_len'?: (_google_protobuf_UInt32Value | null); + /** + * Length of prefix mask len for IPv6 (e.g. 0, 128). + * Defaults to 128 when unset. + * For example, trusted address from x-forwarded-for is ``2001:abcd:ef01:2345:6789:abcd:ef01:234``, + * the descriptor entry is ("masked_remote_address", "2001:abcd:ef01:2345:6789:abcd:ef01:234/128"); + * if mask len is 64, the descriptor entry is ("masked_remote_address", "2001:abcd:ef01:2345::/64"). + */ + 'v6_prefix_mask_len'?: (_google_protobuf_UInt32Value | null); +} + +/** + * The following descriptor entry is appended to the descriptor and is populated using the + * masked address from :ref:`x-forwarded-for `: + * + * .. code-block:: cpp + * + * ("masked_remote_address", "") + */ +export interface _envoy_config_route_v3_RateLimit_Action_MaskedRemoteAddress__Output { + /** + * Length of prefix mask len for IPv4 (e.g. 0, 32). + * Defaults to 32 when unset. + * For example, trusted address from x-forwarded-for is ``192.168.1.1``, + * the descriptor entry is ("masked_remote_address", "192.168.1.1/32"); + * if mask len is 24, the descriptor entry is ("masked_remote_address", "192.168.1.0/24"). + */ + 'v4_prefix_mask_len': (_google_protobuf_UInt32Value__Output | null); + /** + * Length of prefix mask len for IPv6 (e.g. 0, 128). + * Defaults to 128 when unset. + * For example, trusted address from x-forwarded-for is ``2001:abcd:ef01:2345:6789:abcd:ef01:234``, + * the descriptor entry is ("masked_remote_address", "2001:abcd:ef01:2345:6789:abcd:ef01:234/128"); + * if mask len is 64, the descriptor entry is ("masked_remote_address", "2001:abcd:ef01:2345::/64"). + */ + 'v6_prefix_mask_len': (_google_protobuf_UInt32Value__Output | null); +} + +/** + * The following descriptor entry is appended when the metadata contains a key value: + * + * .. code-block:: cpp + * + * ("", "") + * [#next-free-field: 6] + */ +export interface _envoy_config_route_v3_RateLimit_Action_MetaData { + /** + * The key to use in the descriptor entry. + */ + 'descriptor_key'?: (string); + /** + * Metadata struct that defines the key and path to retrieve the string value. A match will + * only happen if the value in the metadata is of type string. + */ + 'metadata_key'?: (_envoy_type_metadata_v3_MetadataKey | null); + /** + * An optional value to use if ``metadata_key`` is empty. If not set and + * no value is present under the metadata_key then ``skip_if_absent`` is followed to + * skip calling the rate limiting service or skip the descriptor. + */ + 'default_value'?: (string); + /** + * Source of metadata + */ + 'source'?: (_envoy_config_route_v3_RateLimit_Action_MetaData_Source); + /** + * If set to true, Envoy skips the descriptor while calling rate limiting service + * when ``metadata_key`` is empty and ``default_value`` is not set. By default it skips calling the + * rate limiting service in that case. + */ + 'skip_if_absent'?: (boolean); +} + +/** + * The following descriptor entry is appended when the metadata contains a key value: + * + * .. code-block:: cpp + * + * ("", "") + * [#next-free-field: 6] + */ +export interface _envoy_config_route_v3_RateLimit_Action_MetaData__Output { + /** + * The key to use in the descriptor entry. + */ + 'descriptor_key': (string); + /** + * Metadata struct that defines the key and path to retrieve the string value. A match will + * only happen if the value in the metadata is of type string. + */ + 'metadata_key': (_envoy_type_metadata_v3_MetadataKey__Output | null); + /** + * An optional value to use if ``metadata_key`` is empty. If not set and + * no value is present under the metadata_key then ``skip_if_absent`` is followed to + * skip calling the rate limiting service or skip the descriptor. + */ + 'default_value': (string); + /** + * Source of metadata + */ + 'source': (_envoy_config_route_v3_RateLimit_Action_MetaData_Source__Output); + /** + * If set to true, Envoy skips the descriptor while calling rate limiting service + * when ``metadata_key`` is empty and ``default_value`` is not set. By default it skips calling the + * rate limiting service in that case. + */ + 'skip_if_absent': (boolean); +} + +export interface _envoy_config_route_v3_RateLimit_Override { + /** + * Limit override from dynamic metadata. + */ + 'dynamic_metadata'?: (_envoy_config_route_v3_RateLimit_Override_DynamicMetadata | null); + 'override_specifier'?: "dynamic_metadata"; +} + +export interface _envoy_config_route_v3_RateLimit_Override__Output { + /** + * Limit override from dynamic metadata. + */ + 'dynamic_metadata'?: (_envoy_config_route_v3_RateLimit_Override_DynamicMetadata__Output | null); + 'override_specifier': "dynamic_metadata"; +} + +/** + * The following descriptor entry is appended to the descriptor: + * + * .. code-block:: cpp + * + * ("query_match", "") + */ +export interface _envoy_config_route_v3_RateLimit_Action_QueryParameterValueMatch { + /** + * The key to use in the descriptor entry. Defaults to ``query_match``. + */ + 'descriptor_key'?: (string); + /** + * The value to use in the descriptor entry. + */ + 'descriptor_value'?: (string); + /** + * If set to true, the action will append a descriptor entry when the + * request matches the headers. If set to false, the action will append a + * descriptor entry when the request does not match the headers. The + * default value is true. + */ + 'expect_match'?: (_google_protobuf_BoolValue | null); + /** + * Specifies a set of query parameters that the rate limit action should match + * on. The action will check the request’s query parameters against all the + * specified query parameters in the config. A match will happen if all the + * query parameters in the config are present in the request with the same values + * (or based on presence if the value field is not in the config). + */ + 'query_parameters'?: (_envoy_config_route_v3_QueryParameterMatcher)[]; +} + +/** + * The following descriptor entry is appended to the descriptor: + * + * .. code-block:: cpp + * + * ("query_match", "") + */ +export interface _envoy_config_route_v3_RateLimit_Action_QueryParameterValueMatch__Output { + /** + * The key to use in the descriptor entry. Defaults to ``query_match``. + */ + 'descriptor_key': (string); + /** + * The value to use in the descriptor entry. + */ + 'descriptor_value': (string); + /** + * If set to true, the action will append a descriptor entry when the + * request matches the headers. If set to false, the action will append a + * descriptor entry when the request does not match the headers. The + * default value is true. + */ + 'expect_match': (_google_protobuf_BoolValue__Output | null); + /** + * Specifies a set of query parameters that the rate limit action should match + * on. The action will check the request’s query parameters against all the + * specified query parameters in the config. A match will happen if all the + * query parameters in the config are present in the request with the same values + * (or based on presence if the value field is not in the config). + */ + 'query_parameters': (_envoy_config_route_v3_QueryParameterMatcher__Output)[]; +} + +/** + * The following descriptor entry is appended to the descriptor and is populated using the + * trusted address from :ref:`x-forwarded-for `: + * + * .. code-block:: cpp + * + * ("remote_address", "") + */ +export interface _envoy_config_route_v3_RateLimit_Action_RemoteAddress { +} + +/** + * The following descriptor entry is appended to the descriptor and is populated using the + * trusted address from :ref:`x-forwarded-for `: + * + * .. code-block:: cpp + * + * ("remote_address", "") + */ +export interface _envoy_config_route_v3_RateLimit_Action_RemoteAddress__Output { +} + +/** + * The following descriptor entry is appended when a header contains a key that matches the + * ``header_name``: + * + * .. code-block:: cpp + * + * ("", "") + */ +export interface _envoy_config_route_v3_RateLimit_Action_RequestHeaders { + /** + * The header name to be queried from the request headers. The header’s + * value is used to populate the value of the descriptor entry for the + * descriptor_key. + */ + 'header_name'?: (string); + /** + * The key to use in the descriptor entry. + */ + 'descriptor_key'?: (string); + /** + * If set to true, Envoy skips the descriptor while calling rate limiting service + * when header is not present in the request. By default it skips calling the + * rate limiting service if this header is not present in the request. + */ + 'skip_if_absent'?: (boolean); +} + +/** + * The following descriptor entry is appended when a header contains a key that matches the + * ``header_name``: + * + * .. code-block:: cpp + * + * ("", "") + */ +export interface _envoy_config_route_v3_RateLimit_Action_RequestHeaders__Output { + /** + * The header name to be queried from the request headers. The header’s + * value is used to populate the value of the descriptor entry for the + * descriptor_key. + */ + 'header_name': (string); + /** + * The key to use in the descriptor entry. + */ + 'descriptor_key': (string); + /** + * If set to true, Envoy skips the descriptor while calling rate limiting service + * when header is not present in the request. By default it skips calling the + * rate limiting service if this header is not present in the request. + */ + 'skip_if_absent': (boolean); +} + +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto + +export const _envoy_config_route_v3_RateLimit_Action_MetaData_Source = { + /** + * Query :ref:`dynamic metadata ` + */ + DYNAMIC: 'DYNAMIC', + /** + * Query :ref:`route entry metadata ` + */ + ROUTE_ENTRY: 'ROUTE_ENTRY', +} as const; + +export type _envoy_config_route_v3_RateLimit_Action_MetaData_Source = + /** + * Query :ref:`dynamic metadata ` + */ + | 'DYNAMIC' + | 0 + /** + * Query :ref:`route entry metadata ` + */ + | 'ROUTE_ENTRY' + | 1 + +export type _envoy_config_route_v3_RateLimit_Action_MetaData_Source__Output = typeof _envoy_config_route_v3_RateLimit_Action_MetaData_Source[keyof typeof _envoy_config_route_v3_RateLimit_Action_MetaData_Source] + +/** + * The following descriptor entry is appended to the descriptor: + * + * .. code-block:: cpp + * + * ("source_cluster", "") + * + * is derived from the :option:`--service-cluster` option. + */ +export interface _envoy_config_route_v3_RateLimit_Action_SourceCluster { +} + +/** + * The following descriptor entry is appended to the descriptor: + * + * .. code-block:: cpp + * + * ("source_cluster", "") + * + * is derived from the :option:`--service-cluster` option. + */ +export interface _envoy_config_route_v3_RateLimit_Action_SourceCluster__Output { +} + +/** + * Global rate limiting :ref:`architecture overview `. + * Also applies to Local rate limiting :ref:`using descriptors `. + */ +export interface RateLimit { + /** + * Refers to the stage set in the filter. The rate limit configuration only + * applies to filters with the same stage number. The default stage number is + * 0. + * + * .. note:: + * + * The filter supports a range of 0 - 10 inclusively for stage numbers. + */ + 'stage'?: (_google_protobuf_UInt32Value | null); + /** + * The key to be set in runtime to disable this rate limit configuration. + */ + 'disable_key'?: (string); + /** + * A list of actions that are to be applied for this rate limit configuration. + * Order matters as the actions are processed sequentially and the descriptor + * is composed by appending descriptor entries in that sequence. If an action + * cannot append a descriptor entry, no descriptor is generated for the + * configuration. See :ref:`composing actions + * ` for additional documentation. + */ + 'actions'?: (_envoy_config_route_v3_RateLimit_Action)[]; + /** + * An optional limit override to be appended to the descriptor produced by this + * rate limit configuration. If the override value is invalid or cannot be resolved + * from metadata, no override is provided. See :ref:`rate limit override + * ` for more information. + */ + 'limit'?: (_envoy_config_route_v3_RateLimit_Override | null); +} + +/** + * Global rate limiting :ref:`architecture overview `. + * Also applies to Local rate limiting :ref:`using descriptors `. + */ +export interface RateLimit__Output { + /** + * Refers to the stage set in the filter. The rate limit configuration only + * applies to filters with the same stage number. The default stage number is + * 0. + * + * .. note:: + * + * The filter supports a range of 0 - 10 inclusively for stage numbers. + */ + 'stage': (_google_protobuf_UInt32Value__Output | null); + /** + * The key to be set in runtime to disable this rate limit configuration. + */ + 'disable_key': (string); + /** + * A list of actions that are to be applied for this rate limit configuration. + * Order matters as the actions are processed sequentially and the descriptor + * is composed by appending descriptor entries in that sequence. If an action + * cannot append a descriptor entry, no descriptor is generated for the + * configuration. See :ref:`composing actions + * ` for additional documentation. + */ + 'actions': (_envoy_config_route_v3_RateLimit_Action__Output)[]; + /** + * An optional limit override to be appended to the descriptor produced by this + * rate limit configuration. If the override value is invalid or cannot be resolved + * from metadata, no override is provided. See :ref:`rate limit override + * ` for more information. + */ + 'limit': (_envoy_config_route_v3_RateLimit_Override__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/route/v3/RedirectAction.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/RedirectAction.ts new file mode 100644 index 000000000..070470af3 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/RedirectAction.ts @@ -0,0 +1,251 @@ +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto + +import type { RegexMatchAndSubstitute as _envoy_type_matcher_v3_RegexMatchAndSubstitute, RegexMatchAndSubstitute__Output as _envoy_type_matcher_v3_RegexMatchAndSubstitute__Output } from '../../../../envoy/type/matcher/v3/RegexMatchAndSubstitute'; + +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto + +export const _envoy_config_route_v3_RedirectAction_RedirectResponseCode = { + /** + * Moved Permanently HTTP Status Code - 301. + */ + MOVED_PERMANENTLY: 'MOVED_PERMANENTLY', + /** + * Found HTTP Status Code - 302. + */ + FOUND: 'FOUND', + /** + * See Other HTTP Status Code - 303. + */ + SEE_OTHER: 'SEE_OTHER', + /** + * Temporary Redirect HTTP Status Code - 307. + */ + TEMPORARY_REDIRECT: 'TEMPORARY_REDIRECT', + /** + * Permanent Redirect HTTP Status Code - 308. + */ + PERMANENT_REDIRECT: 'PERMANENT_REDIRECT', +} as const; + +export type _envoy_config_route_v3_RedirectAction_RedirectResponseCode = + /** + * Moved Permanently HTTP Status Code - 301. + */ + | 'MOVED_PERMANENTLY' + | 0 + /** + * Found HTTP Status Code - 302. + */ + | 'FOUND' + | 1 + /** + * See Other HTTP Status Code - 303. + */ + | 'SEE_OTHER' + | 2 + /** + * Temporary Redirect HTTP Status Code - 307. + */ + | 'TEMPORARY_REDIRECT' + | 3 + /** + * Permanent Redirect HTTP Status Code - 308. + */ + | 'PERMANENT_REDIRECT' + | 4 + +export type _envoy_config_route_v3_RedirectAction_RedirectResponseCode__Output = typeof _envoy_config_route_v3_RedirectAction_RedirectResponseCode[keyof typeof _envoy_config_route_v3_RedirectAction_RedirectResponseCode] + +/** + * [#next-free-field: 10] + */ +export interface RedirectAction { + /** + * The host portion of the URL will be swapped with this value. + */ + 'host_redirect'?: (string); + /** + * The path portion of the URL will be swapped with this value. + * Please note that query string in path_redirect will override the + * request's query string and will not be stripped. + * + * For example, let's say we have the following routes: + * + * - match: { path: "/old-path-1" } + * redirect: { path_redirect: "/new-path-1" } + * - match: { path: "/old-path-2" } + * redirect: { path_redirect: "/new-path-2", strip-query: "true" } + * - match: { path: "/old-path-3" } + * redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } + * + * 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" + * 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" + * 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" + */ + 'path_redirect'?: (string); + /** + * The HTTP status code to use in the redirect response. The default response + * code is MOVED_PERMANENTLY (301). + */ + 'response_code'?: (_envoy_config_route_v3_RedirectAction_RedirectResponseCode); + /** + * The scheme portion of the URL will be swapped with "https". + */ + 'https_redirect'?: (boolean); + /** + * Indicates that during redirection, the matched prefix (or path) + * should be swapped with this value. This option allows redirect URLs be dynamically created + * based on the request. + * + * .. attention:: + * + * Pay attention to the use of trailing slashes as mentioned in + * :ref:`RouteAction's prefix_rewrite `. + */ + 'prefix_rewrite'?: (string); + /** + * Indicates that during redirection, the query portion of the URL will + * be removed. Default value is false. + */ + 'strip_query'?: (boolean); + /** + * The scheme portion of the URL will be swapped with this value. + */ + 'scheme_redirect'?: (string); + /** + * The port value of the URL will be swapped with this value. + */ + 'port_redirect'?: (number); + /** + * Indicates that during redirect, portions of the path that match the + * pattern should be rewritten, even allowing the substitution of capture + * groups from the pattern into the new path as specified by the rewrite + * substitution string. This is useful to allow application paths to be + * rewritten in a way that is aware of segments with variable content like + * identifiers. + * + * Examples using Google's `RE2 `_ engine: + * + * * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution + * string of ``\2/instance/\1`` would transform ``/service/foo/v1/api`` + * into ``/v1/api/instance/foo``. + * + * * The pattern ``one`` paired with a substitution string of ``two`` would + * transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``. + * + * * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of + * ``\1two\2`` would replace only the first occurrence of ``one``, + * transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``. + * + * * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` + * would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to + * ``/aaa/yyy/bbb``. + */ + 'regex_rewrite'?: (_envoy_type_matcher_v3_RegexMatchAndSubstitute | null); + /** + * When the scheme redirection take place, the following rules apply: + * 1. If the source URI scheme is ``http`` and the port is explicitly + * set to ``:80``, the port will be removed after the redirection + * 2. If the source URI scheme is ``https`` and the port is explicitly + * set to ``:443``, the port will be removed after the redirection + */ + 'scheme_rewrite_specifier'?: "https_redirect"|"scheme_redirect"; + 'path_rewrite_specifier'?: "path_redirect"|"prefix_rewrite"|"regex_rewrite"; +} + +/** + * [#next-free-field: 10] + */ +export interface RedirectAction__Output { + /** + * The host portion of the URL will be swapped with this value. + */ + 'host_redirect': (string); + /** + * The path portion of the URL will be swapped with this value. + * Please note that query string in path_redirect will override the + * request's query string and will not be stripped. + * + * For example, let's say we have the following routes: + * + * - match: { path: "/old-path-1" } + * redirect: { path_redirect: "/new-path-1" } + * - match: { path: "/old-path-2" } + * redirect: { path_redirect: "/new-path-2", strip-query: "true" } + * - match: { path: "/old-path-3" } + * redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } + * + * 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" + * 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" + * 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" + */ + 'path_redirect'?: (string); + /** + * The HTTP status code to use in the redirect response. The default response + * code is MOVED_PERMANENTLY (301). + */ + 'response_code': (_envoy_config_route_v3_RedirectAction_RedirectResponseCode__Output); + /** + * The scheme portion of the URL will be swapped with "https". + */ + 'https_redirect'?: (boolean); + /** + * Indicates that during redirection, the matched prefix (or path) + * should be swapped with this value. This option allows redirect URLs be dynamically created + * based on the request. + * + * .. attention:: + * + * Pay attention to the use of trailing slashes as mentioned in + * :ref:`RouteAction's prefix_rewrite `. + */ + 'prefix_rewrite'?: (string); + /** + * Indicates that during redirection, the query portion of the URL will + * be removed. Default value is false. + */ + 'strip_query': (boolean); + /** + * The scheme portion of the URL will be swapped with this value. + */ + 'scheme_redirect'?: (string); + /** + * The port value of the URL will be swapped with this value. + */ + 'port_redirect': (number); + /** + * Indicates that during redirect, portions of the path that match the + * pattern should be rewritten, even allowing the substitution of capture + * groups from the pattern into the new path as specified by the rewrite + * substitution string. This is useful to allow application paths to be + * rewritten in a way that is aware of segments with variable content like + * identifiers. + * + * Examples using Google's `RE2 `_ engine: + * + * * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution + * string of ``\2/instance/\1`` would transform ``/service/foo/v1/api`` + * into ``/v1/api/instance/foo``. + * + * * The pattern ``one`` paired with a substitution string of ``two`` would + * transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``. + * + * * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of + * ``\1two\2`` would replace only the first occurrence of ``one``, + * transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``. + * + * * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` + * would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to + * ``/aaa/yyy/bbb``. + */ + 'regex_rewrite'?: (_envoy_type_matcher_v3_RegexMatchAndSubstitute__Output | null); + /** + * When the scheme redirection take place, the following rules apply: + * 1. If the source URI scheme is ``http`` and the port is explicitly + * set to ``:80``, the port will be removed after the redirection + * 2. If the source URI scheme is ``https`` and the port is explicitly + * set to ``:443``, the port will be removed after the redirection + */ + 'scheme_rewrite_specifier': "https_redirect"|"scheme_redirect"; + 'path_rewrite_specifier': "path_redirect"|"prefix_rewrite"|"regex_rewrite"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/route/v3/RetryPolicy.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/RetryPolicy.ts new file mode 100644 index 000000000..773943b7f --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/RetryPolicy.ts @@ -0,0 +1,469 @@ +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto + +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; +import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; +import type { HeaderMatcher as _envoy_config_route_v3_HeaderMatcher, HeaderMatcher__Output as _envoy_config_route_v3_HeaderMatcher__Output } from '../../../../envoy/config/route/v3/HeaderMatcher'; +import type { TypedExtensionConfig as _envoy_config_core_v3_TypedExtensionConfig, TypedExtensionConfig__Output as _envoy_config_core_v3_TypedExtensionConfig__Output } from '../../../../envoy/config/core/v3/TypedExtensionConfig'; +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; +import type { Long } from '@grpc/proto-loader'; + +/** + * A retry back-off strategy that applies when the upstream server rate limits + * the request. + * + * Given this configuration: + * + * .. code-block:: yaml + * + * rate_limited_retry_back_off: + * reset_headers: + * - name: Retry-After + * format: SECONDS + * - name: X-RateLimit-Reset + * format: UNIX_TIMESTAMP + * max_interval: "300s" + * + * The following algorithm will apply: + * + * 1. If the response contains the header ``Retry-After`` its value must be on + * the form ``120`` (an integer that represents the number of seconds to + * wait before retrying). If so, this value is used as the back-off interval. + * 2. Otherwise, if the response contains the header ``X-RateLimit-Reset`` its + * value must be on the form ``1595320702`` (an integer that represents the + * point in time at which to retry, as a Unix timestamp in seconds). If so, + * the current time is subtracted from this value and the result is used as + * the back-off interval. + * 3. Otherwise, Envoy will use the default + * :ref:`exponential back-off ` + * strategy. + * + * No matter which format is used, if the resulting back-off interval exceeds + * ``max_interval`` it is discarded and the next header in ``reset_headers`` + * is tried. If a request timeout is configured for the route it will further + * limit how long the request will be allowed to run. + * + * To prevent many clients retrying at the same point in time jitter is added + * to the back-off interval, so the resulting interval is decided by taking: + * ``random(interval, interval * 1.5)``. + * + * .. attention:: + * + * Configuring ``rate_limited_retry_back_off`` will not by itself cause a request + * to be retried. You will still need to configure the right retry policy to match + * the responses from the upstream server. + */ +export interface _envoy_config_route_v3_RetryPolicy_RateLimitedRetryBackOff { + /** + * Specifies the reset headers (like ``Retry-After`` or ``X-RateLimit-Reset``) + * to match against the response. Headers are tried in order, and matched case + * insensitive. The first header to be parsed successfully is used. If no headers + * match the default exponential back-off is used instead. + */ + 'reset_headers'?: (_envoy_config_route_v3_RetryPolicy_ResetHeader)[]; + /** + * Specifies the maximum back off interval that Envoy will allow. If a reset + * header contains an interval longer than this then it will be discarded and + * the next header will be tried. Defaults to 300 seconds. + */ + 'max_interval'?: (_google_protobuf_Duration | null); +} + +/** + * A retry back-off strategy that applies when the upstream server rate limits + * the request. + * + * Given this configuration: + * + * .. code-block:: yaml + * + * rate_limited_retry_back_off: + * reset_headers: + * - name: Retry-After + * format: SECONDS + * - name: X-RateLimit-Reset + * format: UNIX_TIMESTAMP + * max_interval: "300s" + * + * The following algorithm will apply: + * + * 1. If the response contains the header ``Retry-After`` its value must be on + * the form ``120`` (an integer that represents the number of seconds to + * wait before retrying). If so, this value is used as the back-off interval. + * 2. Otherwise, if the response contains the header ``X-RateLimit-Reset`` its + * value must be on the form ``1595320702`` (an integer that represents the + * point in time at which to retry, as a Unix timestamp in seconds). If so, + * the current time is subtracted from this value and the result is used as + * the back-off interval. + * 3. Otherwise, Envoy will use the default + * :ref:`exponential back-off ` + * strategy. + * + * No matter which format is used, if the resulting back-off interval exceeds + * ``max_interval`` it is discarded and the next header in ``reset_headers`` + * is tried. If a request timeout is configured for the route it will further + * limit how long the request will be allowed to run. + * + * To prevent many clients retrying at the same point in time jitter is added + * to the back-off interval, so the resulting interval is decided by taking: + * ``random(interval, interval * 1.5)``. + * + * .. attention:: + * + * Configuring ``rate_limited_retry_back_off`` will not by itself cause a request + * to be retried. You will still need to configure the right retry policy to match + * the responses from the upstream server. + */ +export interface _envoy_config_route_v3_RetryPolicy_RateLimitedRetryBackOff__Output { + /** + * Specifies the reset headers (like ``Retry-After`` or ``X-RateLimit-Reset``) + * to match against the response. Headers are tried in order, and matched case + * insensitive. The first header to be parsed successfully is used. If no headers + * match the default exponential back-off is used instead. + */ + 'reset_headers': (_envoy_config_route_v3_RetryPolicy_ResetHeader__Output)[]; + /** + * Specifies the maximum back off interval that Envoy will allow. If a reset + * header contains an interval longer than this then it will be discarded and + * the next header will be tried. Defaults to 300 seconds. + */ + 'max_interval': (_google_protobuf_Duration__Output | null); +} + +export interface _envoy_config_route_v3_RetryPolicy_ResetHeader { + /** + * The name of the reset header. + * + * .. note:: + * + * If the header appears multiple times only the first value is used. + */ + 'name'?: (string); + /** + * The format of the reset header. + */ + 'format'?: (_envoy_config_route_v3_RetryPolicy_ResetHeaderFormat); +} + +export interface _envoy_config_route_v3_RetryPolicy_ResetHeader__Output { + /** + * The name of the reset header. + * + * .. note:: + * + * If the header appears multiple times only the first value is used. + */ + 'name': (string); + /** + * The format of the reset header. + */ + 'format': (_envoy_config_route_v3_RetryPolicy_ResetHeaderFormat__Output); +} + +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto + +export const _envoy_config_route_v3_RetryPolicy_ResetHeaderFormat = { + SECONDS: 'SECONDS', + UNIX_TIMESTAMP: 'UNIX_TIMESTAMP', +} as const; + +export type _envoy_config_route_v3_RetryPolicy_ResetHeaderFormat = + | 'SECONDS' + | 0 + | 'UNIX_TIMESTAMP' + | 1 + +export type _envoy_config_route_v3_RetryPolicy_ResetHeaderFormat__Output = typeof _envoy_config_route_v3_RetryPolicy_ResetHeaderFormat[keyof typeof _envoy_config_route_v3_RetryPolicy_ResetHeaderFormat] + +export interface _envoy_config_route_v3_RetryPolicy_RetryBackOff { + /** + * Specifies the base interval between retries. This parameter is required and must be greater + * than zero. Values less than 1 ms are rounded up to 1 ms. + * See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's + * back-off algorithm. + */ + 'base_interval'?: (_google_protobuf_Duration | null); + /** + * Specifies the maximum interval between retries. This parameter is optional, but must be + * greater than or equal to the ``base_interval`` if set. The default is 10 times the + * ``base_interval``. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion + * of Envoy's back-off algorithm. + */ + 'max_interval'?: (_google_protobuf_Duration | null); +} + +export interface _envoy_config_route_v3_RetryPolicy_RetryBackOff__Output { + /** + * Specifies the base interval between retries. This parameter is required and must be greater + * than zero. Values less than 1 ms are rounded up to 1 ms. + * See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's + * back-off algorithm. + */ + 'base_interval': (_google_protobuf_Duration__Output | null); + /** + * Specifies the maximum interval between retries. This parameter is optional, but must be + * greater than or equal to the ``base_interval`` if set. The default is 10 times the + * ``base_interval``. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion + * of Envoy's back-off algorithm. + */ + 'max_interval': (_google_protobuf_Duration__Output | null); +} + +export interface _envoy_config_route_v3_RetryPolicy_RetryHostPredicate { + 'name'?: (string); + 'typed_config'?: (_google_protobuf_Any | null); + /** + * [#extension-category: envoy.retry_host_predicates] + */ + 'config_type'?: "typed_config"; +} + +export interface _envoy_config_route_v3_RetryPolicy_RetryHostPredicate__Output { + 'name': (string); + 'typed_config'?: (_google_protobuf_Any__Output | null); + /** + * [#extension-category: envoy.retry_host_predicates] + */ + 'config_type': "typed_config"; +} + +export interface _envoy_config_route_v3_RetryPolicy_RetryPriority { + 'name'?: (string); + 'typed_config'?: (_google_protobuf_Any | null); + /** + * [#extension-category: envoy.retry_priorities] + */ + 'config_type'?: "typed_config"; +} + +export interface _envoy_config_route_v3_RetryPolicy_RetryPriority__Output { + 'name': (string); + 'typed_config'?: (_google_protobuf_Any__Output | null); + /** + * [#extension-category: envoy.retry_priorities] + */ + 'config_type': "typed_config"; +} + +/** + * HTTP retry :ref:`architecture overview `. + * [#next-free-field: 14] + */ +export interface RetryPolicy { + /** + * Specifies the conditions under which retry takes place. These are the same + * conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and + * :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. + */ + 'retry_on'?: (string); + /** + * Specifies the allowed number of retries. This parameter is optional and + * defaults to 1. These are the same conditions documented for + * :ref:`config_http_filters_router_x-envoy-max-retries`. + */ + 'num_retries'?: (_google_protobuf_UInt32Value | null); + /** + * Specifies a non-zero upstream timeout per retry attempt (including the initial attempt). This + * parameter is optional. The same conditions documented for + * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. + * + * .. note:: + * + * If left unspecified, Envoy will use the global + * :ref:`route timeout ` for the request. + * Consequently, when using a :ref:`5xx ` based + * retry policy, a request that times out will not be retried as the total timeout budget + * would have been exhausted. + */ + 'per_try_timeout'?: (_google_protobuf_Duration | null); + /** + * Specifies an implementation of a RetryPriority which is used to determine the + * distribution of load across priorities used for retries. Refer to + * :ref:`retry plugin configuration ` for more details. + */ + 'retry_priority'?: (_envoy_config_route_v3_RetryPolicy_RetryPriority | null); + /** + * Specifies a collection of RetryHostPredicates that will be consulted when selecting a host + * for retries. If any of the predicates reject the host, host selection will be reattempted. + * Refer to :ref:`retry plugin configuration ` for more + * details. + */ + 'retry_host_predicate'?: (_envoy_config_route_v3_RetryPolicy_RetryHostPredicate)[]; + /** + * The maximum number of times host selection will be reattempted before giving up, at which + * point the host that was last selected will be routed to. If unspecified, this will default to + * retrying once. + */ + 'host_selection_retry_max_attempts'?: (number | string | Long); + /** + * HTTP status codes that should trigger a retry in addition to those specified by retry_on. + */ + 'retriable_status_codes'?: (number)[]; + /** + * Specifies parameters that control exponential retry back off. This parameter is optional, in which case the + * default base interval is 25 milliseconds or, if set, the current value of the + * ``upstream.base_retry_backoff_ms`` runtime parameter. The default maximum interval is 10 times + * the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` + * describes Envoy's back-off algorithm. + */ + 'retry_back_off'?: (_envoy_config_route_v3_RetryPolicy_RetryBackOff | null); + /** + * HTTP response headers that trigger a retry if present in the response. A retry will be + * triggered if any of the header matches match the upstream response headers. + * The field is only consulted if 'retriable-headers' retry policy is active. + */ + 'retriable_headers'?: (_envoy_config_route_v3_HeaderMatcher)[]; + /** + * HTTP headers which must be present in the request for retries to be attempted. + */ + 'retriable_request_headers'?: (_envoy_config_route_v3_HeaderMatcher)[]; + /** + * Specifies parameters that control a retry back-off strategy that is used + * when the request is rate limited by the upstream server. The server may + * return a response header like ``Retry-After`` or ``X-RateLimit-Reset`` to + * provide feedback to the client on how long to wait before retrying. If + * configured, this back-off strategy will be used instead of the + * default exponential back off strategy (configured using ``retry_back_off``) + * whenever a response includes the matching headers. + */ + 'rate_limited_retry_back_off'?: (_envoy_config_route_v3_RetryPolicy_RateLimitedRetryBackOff | null); + /** + * Retry options predicates that will be applied prior to retrying a request. These predicates + * allow customizing request behavior between retries. + * [#comment: add [#extension-category: envoy.retry_options_predicates] when there are built-in extensions] + */ + 'retry_options_predicates'?: (_envoy_config_core_v3_TypedExtensionConfig)[]; + /** + * Specifies an upstream idle timeout per retry attempt (including the initial attempt). This + * parameter is optional and if absent there is no per try idle timeout. The semantics of the per + * try idle timeout are similar to the + * :ref:`route idle timeout ` and + * :ref:`stream idle timeout + * ` + * both enforced by the HTTP connection manager. The difference is that this idle timeout + * is enforced by the router for each individual attempt and thus after all previous filters have + * run, as opposed to *before* all previous filters run for the other idle timeouts. This timeout + * is useful in cases in which total request timeout is bounded by a number of retries and a + * :ref:`per_try_timeout `, but + * there is a desire to ensure each try is making incremental progress. Note also that similar + * to :ref:`per_try_timeout `, + * this idle timeout does not start until after both the entire request has been received by the + * router *and* a connection pool connection has been obtained. Unlike + * :ref:`per_try_timeout `, + * the idle timer continues once the response starts streaming back to the downstream client. + * This ensures that response data continues to make progress without using one of the HTTP + * connection manager idle timeouts. + */ + 'per_try_idle_timeout'?: (_google_protobuf_Duration | null); +} + +/** + * HTTP retry :ref:`architecture overview `. + * [#next-free-field: 14] + */ +export interface RetryPolicy__Output { + /** + * Specifies the conditions under which retry takes place. These are the same + * conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and + * :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. + */ + 'retry_on': (string); + /** + * Specifies the allowed number of retries. This parameter is optional and + * defaults to 1. These are the same conditions documented for + * :ref:`config_http_filters_router_x-envoy-max-retries`. + */ + 'num_retries': (_google_protobuf_UInt32Value__Output | null); + /** + * Specifies a non-zero upstream timeout per retry attempt (including the initial attempt). This + * parameter is optional. The same conditions documented for + * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. + * + * .. note:: + * + * If left unspecified, Envoy will use the global + * :ref:`route timeout ` for the request. + * Consequently, when using a :ref:`5xx ` based + * retry policy, a request that times out will not be retried as the total timeout budget + * would have been exhausted. + */ + 'per_try_timeout': (_google_protobuf_Duration__Output | null); + /** + * Specifies an implementation of a RetryPriority which is used to determine the + * distribution of load across priorities used for retries. Refer to + * :ref:`retry plugin configuration ` for more details. + */ + 'retry_priority': (_envoy_config_route_v3_RetryPolicy_RetryPriority__Output | null); + /** + * Specifies a collection of RetryHostPredicates that will be consulted when selecting a host + * for retries. If any of the predicates reject the host, host selection will be reattempted. + * Refer to :ref:`retry plugin configuration ` for more + * details. + */ + 'retry_host_predicate': (_envoy_config_route_v3_RetryPolicy_RetryHostPredicate__Output)[]; + /** + * The maximum number of times host selection will be reattempted before giving up, at which + * point the host that was last selected will be routed to. If unspecified, this will default to + * retrying once. + */ + 'host_selection_retry_max_attempts': (string); + /** + * HTTP status codes that should trigger a retry in addition to those specified by retry_on. + */ + 'retriable_status_codes': (number)[]; + /** + * Specifies parameters that control exponential retry back off. This parameter is optional, in which case the + * default base interval is 25 milliseconds or, if set, the current value of the + * ``upstream.base_retry_backoff_ms`` runtime parameter. The default maximum interval is 10 times + * the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` + * describes Envoy's back-off algorithm. + */ + 'retry_back_off': (_envoy_config_route_v3_RetryPolicy_RetryBackOff__Output | null); + /** + * HTTP response headers that trigger a retry if present in the response. A retry will be + * triggered if any of the header matches match the upstream response headers. + * The field is only consulted if 'retriable-headers' retry policy is active. + */ + 'retriable_headers': (_envoy_config_route_v3_HeaderMatcher__Output)[]; + /** + * HTTP headers which must be present in the request for retries to be attempted. + */ + 'retriable_request_headers': (_envoy_config_route_v3_HeaderMatcher__Output)[]; + /** + * Specifies parameters that control a retry back-off strategy that is used + * when the request is rate limited by the upstream server. The server may + * return a response header like ``Retry-After`` or ``X-RateLimit-Reset`` to + * provide feedback to the client on how long to wait before retrying. If + * configured, this back-off strategy will be used instead of the + * default exponential back off strategy (configured using ``retry_back_off``) + * whenever a response includes the matching headers. + */ + 'rate_limited_retry_back_off': (_envoy_config_route_v3_RetryPolicy_RateLimitedRetryBackOff__Output | null); + /** + * Retry options predicates that will be applied prior to retrying a request. These predicates + * allow customizing request behavior between retries. + * [#comment: add [#extension-category: envoy.retry_options_predicates] when there are built-in extensions] + */ + 'retry_options_predicates': (_envoy_config_core_v3_TypedExtensionConfig__Output)[]; + /** + * Specifies an upstream idle timeout per retry attempt (including the initial attempt). This + * parameter is optional and if absent there is no per try idle timeout. The semantics of the per + * try idle timeout are similar to the + * :ref:`route idle timeout ` and + * :ref:`stream idle timeout + * ` + * both enforced by the HTTP connection manager. The difference is that this idle timeout + * is enforced by the router for each individual attempt and thus after all previous filters have + * run, as opposed to *before* all previous filters run for the other idle timeouts. This timeout + * is useful in cases in which total request timeout is bounded by a number of retries and a + * :ref:`per_try_timeout `, but + * there is a desire to ensure each try is making incremental progress. Note also that similar + * to :ref:`per_try_timeout `, + * this idle timeout does not start until after both the entire request has been received by the + * router *and* a connection pool connection has been obtained. Unlike + * :ref:`per_try_timeout `, + * the idle timer continues once the response starts streaming back to the downstream client. + * This ensures that response data continues to make progress without using one of the HTTP + * connection manager idle timeouts. + */ + 'per_try_idle_timeout': (_google_protobuf_Duration__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/route/v3/Route.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/Route.ts new file mode 100644 index 000000000..beda9395d --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/Route.ts @@ -0,0 +1,276 @@ +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto + +import type { RouteMatch as _envoy_config_route_v3_RouteMatch, RouteMatch__Output as _envoy_config_route_v3_RouteMatch__Output } from '../../../../envoy/config/route/v3/RouteMatch'; +import type { RouteAction as _envoy_config_route_v3_RouteAction, RouteAction__Output as _envoy_config_route_v3_RouteAction__Output } from '../../../../envoy/config/route/v3/RouteAction'; +import type { RedirectAction as _envoy_config_route_v3_RedirectAction, RedirectAction__Output as _envoy_config_route_v3_RedirectAction__Output } from '../../../../envoy/config/route/v3/RedirectAction'; +import type { Metadata as _envoy_config_core_v3_Metadata, Metadata__Output as _envoy_config_core_v3_Metadata__Output } from '../../../../envoy/config/core/v3/Metadata'; +import type { Decorator as _envoy_config_route_v3_Decorator, Decorator__Output as _envoy_config_route_v3_Decorator__Output } from '../../../../envoy/config/route/v3/Decorator'; +import type { DirectResponseAction as _envoy_config_route_v3_DirectResponseAction, DirectResponseAction__Output as _envoy_config_route_v3_DirectResponseAction__Output } from '../../../../envoy/config/route/v3/DirectResponseAction'; +import type { HeaderValueOption as _envoy_config_core_v3_HeaderValueOption, HeaderValueOption__Output as _envoy_config_core_v3_HeaderValueOption__Output } from '../../../../envoy/config/core/v3/HeaderValueOption'; +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; +import type { Tracing as _envoy_config_route_v3_Tracing, Tracing__Output as _envoy_config_route_v3_Tracing__Output } from '../../../../envoy/config/route/v3/Tracing'; +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; +import type { FilterAction as _envoy_config_route_v3_FilterAction, FilterAction__Output as _envoy_config_route_v3_FilterAction__Output } from '../../../../envoy/config/route/v3/FilterAction'; +import type { NonForwardingAction as _envoy_config_route_v3_NonForwardingAction, NonForwardingAction__Output as _envoy_config_route_v3_NonForwardingAction__Output } from '../../../../envoy/config/route/v3/NonForwardingAction'; + +/** + * A route is both a specification of how to match a request as well as an indication of what to do + * next (e.g., redirect, forward, rewrite, etc.). + * + * .. attention:: + * + * Envoy supports routing on HTTP method via :ref:`header matching + * `. + * [#next-free-field: 20] + */ +export interface Route { + /** + * Route matching parameters. + */ + 'match'?: (_envoy_config_route_v3_RouteMatch | null); + /** + * Route request to some upstream cluster. + */ + 'route'?: (_envoy_config_route_v3_RouteAction | null); + /** + * Return a redirect. + */ + 'redirect'?: (_envoy_config_route_v3_RedirectAction | null); + /** + * The Metadata field can be used to provide additional information + * about the route. It can be used for configuration, stats, and logging. + * The metadata should go under the filter namespace that will need it. + * For instance, if the metadata is intended for the Router filter, + * the filter name should be specified as ``envoy.filters.http.router``. + */ + 'metadata'?: (_envoy_config_core_v3_Metadata | null); + /** + * Decorator for the matched route. + */ + 'decorator'?: (_envoy_config_route_v3_Decorator | null); + /** + * Return an arbitrary HTTP response directly, without proxying. + */ + 'direct_response'?: (_envoy_config_route_v3_DirectResponseAction | null); + /** + * Specifies a set of headers that will be added to requests matching this + * route. Headers specified at this level are applied before headers from the + * enclosing :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and + * :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on + * header value syntax, see the documentation on :ref:`custom request headers + * `. + */ + 'request_headers_to_add'?: (_envoy_config_core_v3_HeaderValueOption)[]; + /** + * Specifies a set of headers that will be added to responses to requests + * matching this route. Headers specified at this level are applied before + * headers from the enclosing :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and + * :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including + * details on header value syntax, see the documentation on + * :ref:`custom request headers `. + */ + 'response_headers_to_add'?: (_envoy_config_core_v3_HeaderValueOption)[]; + /** + * Specifies a list of HTTP headers that should be removed from each response + * to requests matching this route. + */ + 'response_headers_to_remove'?: (string)[]; + /** + * Specifies a list of HTTP headers that should be removed from each request + * matching this route. + */ + 'request_headers_to_remove'?: (string)[]; + /** + * The per_filter_config field can be used to provide route-specific configurations for filters. + * The key should match the :ref:`filter config name + * `. + * The canonical filter name (e.g., ``envoy.filters.http.buffer`` for the HTTP buffer filter) can also + * be used for the backwards compatibility. If there is no entry referred by the filter config name, the + * entry referred by the canonical filter name will be provided to the filters as fallback. + * + * Use of this field is filter specific; + * see the :ref:`HTTP filter documentation ` for if and how it is utilized. + * [#comment: An entry's value may be wrapped in a + * :ref:`FilterConfig` + * message to specify additional options.] + */ + 'typed_per_filter_config'?: ({[key: string]: _google_protobuf_Any}); + /** + * Name for the route. + */ + 'name'?: (string); + /** + * Presence of the object defines whether the connection manager's tracing configuration + * is overridden by this route specific instance. + */ + 'tracing'?: (_envoy_config_route_v3_Tracing | null); + /** + * The maximum bytes which will be buffered for retries and shadowing. + * If set, the bytes actually buffered will be the minimum value of this and the + * listener per_connection_buffer_limit_bytes. + */ + 'per_request_buffer_limit_bytes'?: (_google_protobuf_UInt32Value | null); + /** + * [#not-implemented-hide:] + * A filter-defined action (e.g., it could dynamically generate the RouteAction). + * [#comment: TODO(samflattery): Remove cleanup in route_fuzz_test.cc when + * implemented] + */ + 'filter_action'?: (_envoy_config_route_v3_FilterAction | null); + /** + * [#not-implemented-hide:] + * An action used when the route will generate a response directly, + * without forwarding to an upstream host. This will be used in non-proxy + * xDS clients like the gRPC server. It could also be used in the future + * in Envoy for a filter that directly generates responses for requests. + */ + 'non_forwarding_action'?: (_envoy_config_route_v3_NonForwardingAction | null); + /** + * The human readable prefix to use when emitting statistics for this endpoint. + * The statistics are rooted at vhost..route.. + * This should be set for highly critical + * endpoints that one wishes to get “per-route” statistics on. + * If not set, endpoint statistics are not generated. + * + * The emitted statistics are the same as those documented for :ref:`virtual clusters `. + * + * .. warning:: + * + * We do not recommend setting up a stat prefix for + * every application endpoint. This is both not easily maintainable and + * statistics use a non-trivial amount of memory(approximately 1KiB per route). + */ + 'stat_prefix'?: (string); + 'action'?: "route"|"redirect"|"direct_response"|"filter_action"|"non_forwarding_action"; +} + +/** + * A route is both a specification of how to match a request as well as an indication of what to do + * next (e.g., redirect, forward, rewrite, etc.). + * + * .. attention:: + * + * Envoy supports routing on HTTP method via :ref:`header matching + * `. + * [#next-free-field: 20] + */ +export interface Route__Output { + /** + * Route matching parameters. + */ + 'match': (_envoy_config_route_v3_RouteMatch__Output | null); + /** + * Route request to some upstream cluster. + */ + 'route'?: (_envoy_config_route_v3_RouteAction__Output | null); + /** + * Return a redirect. + */ + 'redirect'?: (_envoy_config_route_v3_RedirectAction__Output | null); + /** + * The Metadata field can be used to provide additional information + * about the route. It can be used for configuration, stats, and logging. + * The metadata should go under the filter namespace that will need it. + * For instance, if the metadata is intended for the Router filter, + * the filter name should be specified as ``envoy.filters.http.router``. + */ + 'metadata': (_envoy_config_core_v3_Metadata__Output | null); + /** + * Decorator for the matched route. + */ + 'decorator': (_envoy_config_route_v3_Decorator__Output | null); + /** + * Return an arbitrary HTTP response directly, without proxying. + */ + 'direct_response'?: (_envoy_config_route_v3_DirectResponseAction__Output | null); + /** + * Specifies a set of headers that will be added to requests matching this + * route. Headers specified at this level are applied before headers from the + * enclosing :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and + * :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on + * header value syntax, see the documentation on :ref:`custom request headers + * `. + */ + 'request_headers_to_add': (_envoy_config_core_v3_HeaderValueOption__Output)[]; + /** + * Specifies a set of headers that will be added to responses to requests + * matching this route. Headers specified at this level are applied before + * headers from the enclosing :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and + * :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including + * details on header value syntax, see the documentation on + * :ref:`custom request headers `. + */ + 'response_headers_to_add': (_envoy_config_core_v3_HeaderValueOption__Output)[]; + /** + * Specifies a list of HTTP headers that should be removed from each response + * to requests matching this route. + */ + 'response_headers_to_remove': (string)[]; + /** + * Specifies a list of HTTP headers that should be removed from each request + * matching this route. + */ + 'request_headers_to_remove': (string)[]; + /** + * The per_filter_config field can be used to provide route-specific configurations for filters. + * The key should match the :ref:`filter config name + * `. + * The canonical filter name (e.g., ``envoy.filters.http.buffer`` for the HTTP buffer filter) can also + * be used for the backwards compatibility. If there is no entry referred by the filter config name, the + * entry referred by the canonical filter name will be provided to the filters as fallback. + * + * Use of this field is filter specific; + * see the :ref:`HTTP filter documentation ` for if and how it is utilized. + * [#comment: An entry's value may be wrapped in a + * :ref:`FilterConfig` + * message to specify additional options.] + */ + 'typed_per_filter_config': ({[key: string]: _google_protobuf_Any__Output}); + /** + * Name for the route. + */ + 'name': (string); + /** + * Presence of the object defines whether the connection manager's tracing configuration + * is overridden by this route specific instance. + */ + 'tracing': (_envoy_config_route_v3_Tracing__Output | null); + /** + * The maximum bytes which will be buffered for retries and shadowing. + * If set, the bytes actually buffered will be the minimum value of this and the + * listener per_connection_buffer_limit_bytes. + */ + 'per_request_buffer_limit_bytes': (_google_protobuf_UInt32Value__Output | null); + /** + * [#not-implemented-hide:] + * A filter-defined action (e.g., it could dynamically generate the RouteAction). + * [#comment: TODO(samflattery): Remove cleanup in route_fuzz_test.cc when + * implemented] + */ + 'filter_action'?: (_envoy_config_route_v3_FilterAction__Output | null); + /** + * [#not-implemented-hide:] + * An action used when the route will generate a response directly, + * without forwarding to an upstream host. This will be used in non-proxy + * xDS clients like the gRPC server. It could also be used in the future + * in Envoy for a filter that directly generates responses for requests. + */ + 'non_forwarding_action'?: (_envoy_config_route_v3_NonForwardingAction__Output | null); + /** + * The human readable prefix to use when emitting statistics for this endpoint. + * The statistics are rooted at vhost..route.. + * This should be set for highly critical + * endpoints that one wishes to get “per-route” statistics on. + * If not set, endpoint statistics are not generated. + * + * The emitted statistics are the same as those documented for :ref:`virtual clusters `. + * + * .. warning:: + * + * We do not recommend setting up a stat prefix for + * every application endpoint. This is both not easily maintainable and + * statistics use a non-trivial amount of memory(approximately 1KiB per route). + */ + 'stat_prefix': (string); + 'action': "route"|"redirect"|"direct_response"|"filter_action"|"non_forwarding_action"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/route/v3/RouteAction.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/RouteAction.ts new file mode 100644 index 000000000..fd38da92e --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/RouteAction.ts @@ -0,0 +1,1385 @@ +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto + +import type { WeightedCluster as _envoy_config_route_v3_WeightedCluster, WeightedCluster__Output as _envoy_config_route_v3_WeightedCluster__Output } from '../../../../envoy/config/route/v3/WeightedCluster'; +import type { Metadata as _envoy_config_core_v3_Metadata, Metadata__Output as _envoy_config_core_v3_Metadata__Output } from '../../../../envoy/config/core/v3/Metadata'; +import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; +import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; +import type { RetryPolicy as _envoy_config_route_v3_RetryPolicy, RetryPolicy__Output as _envoy_config_route_v3_RetryPolicy__Output } from '../../../../envoy/config/route/v3/RetryPolicy'; +import type { RoutingPriority as _envoy_config_core_v3_RoutingPriority, RoutingPriority__Output as _envoy_config_core_v3_RoutingPriority__Output } from '../../../../envoy/config/core/v3/RoutingPriority'; +import type { RateLimit as _envoy_config_route_v3_RateLimit, RateLimit__Output as _envoy_config_route_v3_RateLimit__Output } from '../../../../envoy/config/route/v3/RateLimit'; +import type { CorsPolicy as _envoy_config_route_v3_CorsPolicy, CorsPolicy__Output as _envoy_config_route_v3_CorsPolicy__Output } from '../../../../envoy/config/route/v3/CorsPolicy'; +import type { HedgePolicy as _envoy_config_route_v3_HedgePolicy, HedgePolicy__Output as _envoy_config_route_v3_HedgePolicy__Output } from '../../../../envoy/config/route/v3/HedgePolicy'; +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; +import type { RegexMatchAndSubstitute as _envoy_type_matcher_v3_RegexMatchAndSubstitute, RegexMatchAndSubstitute__Output as _envoy_type_matcher_v3_RegexMatchAndSubstitute__Output } from '../../../../envoy/type/matcher/v3/RegexMatchAndSubstitute'; +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; +import type { InternalRedirectPolicy as _envoy_config_route_v3_InternalRedirectPolicy, InternalRedirectPolicy__Output as _envoy_config_route_v3_InternalRedirectPolicy__Output } from '../../../../envoy/config/route/v3/InternalRedirectPolicy'; +import type { ClusterSpecifierPlugin as _envoy_config_route_v3_ClusterSpecifierPlugin, ClusterSpecifierPlugin__Output as _envoy_config_route_v3_ClusterSpecifierPlugin__Output } from '../../../../envoy/config/route/v3/ClusterSpecifierPlugin'; +import type { TypedExtensionConfig as _envoy_config_core_v3_TypedExtensionConfig, TypedExtensionConfig__Output as _envoy_config_core_v3_TypedExtensionConfig__Output } from '../../../../envoy/config/core/v3/TypedExtensionConfig'; +import type { RuntimeFractionalPercent as _envoy_config_core_v3_RuntimeFractionalPercent, RuntimeFractionalPercent__Output as _envoy_config_core_v3_RuntimeFractionalPercent__Output } from '../../../../envoy/config/core/v3/RuntimeFractionalPercent'; +import type { ProxyProtocolConfig as _envoy_config_core_v3_ProxyProtocolConfig, ProxyProtocolConfig__Output as _envoy_config_core_v3_ProxyProtocolConfig__Output } from '../../../../envoy/config/core/v3/ProxyProtocolConfig'; + +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto + +export const _envoy_config_route_v3_RouteAction_ClusterNotFoundResponseCode = { + /** + * HTTP status code - 503 Service Unavailable. + */ + SERVICE_UNAVAILABLE: 'SERVICE_UNAVAILABLE', + /** + * HTTP status code - 404 Not Found. + */ + NOT_FOUND: 'NOT_FOUND', + /** + * HTTP status code - 500 Internal Server Error. + */ + INTERNAL_SERVER_ERROR: 'INTERNAL_SERVER_ERROR', +} as const; + +export type _envoy_config_route_v3_RouteAction_ClusterNotFoundResponseCode = + /** + * HTTP status code - 503 Service Unavailable. + */ + | 'SERVICE_UNAVAILABLE' + | 0 + /** + * HTTP status code - 404 Not Found. + */ + | 'NOT_FOUND' + | 1 + /** + * HTTP status code - 500 Internal Server Error. + */ + | 'INTERNAL_SERVER_ERROR' + | 2 + +export type _envoy_config_route_v3_RouteAction_ClusterNotFoundResponseCode__Output = typeof _envoy_config_route_v3_RouteAction_ClusterNotFoundResponseCode[keyof typeof _envoy_config_route_v3_RouteAction_ClusterNotFoundResponseCode] + +/** + * Configuration for sending data upstream as a raw data payload. This is used for + * CONNECT or POST requests, when forwarding request payload as raw TCP. + */ +export interface _envoy_config_route_v3_RouteAction_UpgradeConfig_ConnectConfig { + /** + * If present, the proxy protocol header will be prepended to the CONNECT payload sent upstream. + */ + 'proxy_protocol_config'?: (_envoy_config_core_v3_ProxyProtocolConfig | null); + /** + * If set, the route will also allow forwarding POST payload as raw TCP. + */ + 'allow_post'?: (boolean); +} + +/** + * Configuration for sending data upstream as a raw data payload. This is used for + * CONNECT or POST requests, when forwarding request payload as raw TCP. + */ +export interface _envoy_config_route_v3_RouteAction_UpgradeConfig_ConnectConfig__Output { + /** + * If present, the proxy protocol header will be prepended to the CONNECT payload sent upstream. + */ + 'proxy_protocol_config': (_envoy_config_core_v3_ProxyProtocolConfig__Output | null); + /** + * If set, the route will also allow forwarding POST payload as raw TCP. + */ + 'allow_post': (boolean); +} + +export interface _envoy_config_route_v3_RouteAction_HashPolicy_ConnectionProperties { + /** + * Hash on source IP address. + */ + 'source_ip'?: (boolean); +} + +export interface _envoy_config_route_v3_RouteAction_HashPolicy_ConnectionProperties__Output { + /** + * Hash on source IP address. + */ + 'source_ip': (boolean); +} + +/** + * Envoy supports two types of cookie affinity: + * + * 1. Passive. Envoy takes a cookie that's present in the cookies header and + * hashes on its value. + * + * 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) + * on the first request from the client in its response to the client, + * based on the endpoint the request gets sent to. The client then + * presents this on the next and all subsequent requests. The hash of + * this is sufficient to ensure these requests get sent to the same + * endpoint. The cookie is generated by hashing the source and + * destination ports and addresses so that multiple independent HTTP2 + * streams on the same connection will independently receive the same + * cookie, even if they arrive at the Envoy simultaneously. + */ +export interface _envoy_config_route_v3_RouteAction_HashPolicy_Cookie { + /** + * The name of the cookie that will be used to obtain the hash key. If the + * cookie is not present and ttl below is not set, no hash will be + * produced. + */ + 'name'?: (string); + /** + * If specified, a cookie with the TTL will be generated if the cookie is + * not present. If the TTL is present and zero, the generated cookie will + * be a session cookie. + */ + 'ttl'?: (_google_protobuf_Duration | null); + /** + * The name of the path for the cookie. If no path is specified here, no path + * will be set for the cookie. + */ + 'path'?: (string); +} + +/** + * Envoy supports two types of cookie affinity: + * + * 1. Passive. Envoy takes a cookie that's present in the cookies header and + * hashes on its value. + * + * 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) + * on the first request from the client in its response to the client, + * based on the endpoint the request gets sent to. The client then + * presents this on the next and all subsequent requests. The hash of + * this is sufficient to ensure these requests get sent to the same + * endpoint. The cookie is generated by hashing the source and + * destination ports and addresses so that multiple independent HTTP2 + * streams on the same connection will independently receive the same + * cookie, even if they arrive at the Envoy simultaneously. + */ +export interface _envoy_config_route_v3_RouteAction_HashPolicy_Cookie__Output { + /** + * The name of the cookie that will be used to obtain the hash key. If the + * cookie is not present and ttl below is not set, no hash will be + * produced. + */ + 'name': (string); + /** + * If specified, a cookie with the TTL will be generated if the cookie is + * not present. If the TTL is present and zero, the generated cookie will + * be a session cookie. + */ + 'ttl': (_google_protobuf_Duration__Output | null); + /** + * The name of the path for the cookie. If no path is specified here, no path + * will be set for the cookie. + */ + 'path': (string); +} + +export interface _envoy_config_route_v3_RouteAction_HashPolicy_FilterState { + /** + * The name of the Object in the per-request filterState, which is an + * Envoy::Hashable object. If there is no data associated with the key, + * or the stored object is not Envoy::Hashable, no hash will be produced. + */ + 'key'?: (string); +} + +export interface _envoy_config_route_v3_RouteAction_HashPolicy_FilterState__Output { + /** + * The name of the Object in the per-request filterState, which is an + * Envoy::Hashable object. If there is no data associated with the key, + * or the stored object is not Envoy::Hashable, no hash will be produced. + */ + 'key': (string); +} + +/** + * Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer + * `. + * [#next-free-field: 7] + */ +export interface _envoy_config_route_v3_RouteAction_HashPolicy { + /** + * Header hash policy. + */ + 'header'?: (_envoy_config_route_v3_RouteAction_HashPolicy_Header | null); + /** + * Cookie hash policy. + */ + 'cookie'?: (_envoy_config_route_v3_RouteAction_HashPolicy_Cookie | null); + /** + * Connection properties hash policy. + */ + 'connection_properties'?: (_envoy_config_route_v3_RouteAction_HashPolicy_ConnectionProperties | null); + /** + * Query parameter hash policy. + */ + 'query_parameter'?: (_envoy_config_route_v3_RouteAction_HashPolicy_QueryParameter | null); + /** + * Filter state hash policy. + */ + 'filter_state'?: (_envoy_config_route_v3_RouteAction_HashPolicy_FilterState | null); + /** + * The flag that short-circuits the hash computing. This field provides a + * 'fallback' style of configuration: "if a terminal policy doesn't work, + * fallback to rest of the policy list", it saves time when the terminal + * policy works. + * + * If true, and there is already a hash computed, ignore rest of the + * list of hash polices. + * For example, if the following hash methods are configured: + * + * ========= ======== + * specifier terminal + * ========= ======== + * Header A true + * Header B false + * Header C false + * ========= ======== + * + * The generateHash process ends if policy "header A" generates a hash, as + * it's a terminal policy. + */ + 'terminal'?: (boolean); + 'policy_specifier'?: "header"|"cookie"|"connection_properties"|"query_parameter"|"filter_state"; +} + +/** + * Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer + * `. + * [#next-free-field: 7] + */ +export interface _envoy_config_route_v3_RouteAction_HashPolicy__Output { + /** + * Header hash policy. + */ + 'header'?: (_envoy_config_route_v3_RouteAction_HashPolicy_Header__Output | null); + /** + * Cookie hash policy. + */ + 'cookie'?: (_envoy_config_route_v3_RouteAction_HashPolicy_Cookie__Output | null); + /** + * Connection properties hash policy. + */ + 'connection_properties'?: (_envoy_config_route_v3_RouteAction_HashPolicy_ConnectionProperties__Output | null); + /** + * Query parameter hash policy. + */ + 'query_parameter'?: (_envoy_config_route_v3_RouteAction_HashPolicy_QueryParameter__Output | null); + /** + * Filter state hash policy. + */ + 'filter_state'?: (_envoy_config_route_v3_RouteAction_HashPolicy_FilterState__Output | null); + /** + * The flag that short-circuits the hash computing. This field provides a + * 'fallback' style of configuration: "if a terminal policy doesn't work, + * fallback to rest of the policy list", it saves time when the terminal + * policy works. + * + * If true, and there is already a hash computed, ignore rest of the + * list of hash polices. + * For example, if the following hash methods are configured: + * + * ========= ======== + * specifier terminal + * ========= ======== + * Header A true + * Header B false + * Header C false + * ========= ======== + * + * The generateHash process ends if policy "header A" generates a hash, as + * it's a terminal policy. + */ + 'terminal': (boolean); + 'policy_specifier': "header"|"cookie"|"connection_properties"|"query_parameter"|"filter_state"; +} + +export interface _envoy_config_route_v3_RouteAction_HashPolicy_Header { + /** + * The name of the request header that will be used to obtain the hash + * key. If the request header is not present, no hash will be produced. + */ + 'header_name'?: (string); + /** + * If specified, the request header value will be rewritten and used + * to produce the hash key. + */ + 'regex_rewrite'?: (_envoy_type_matcher_v3_RegexMatchAndSubstitute | null); +} + +export interface _envoy_config_route_v3_RouteAction_HashPolicy_Header__Output { + /** + * The name of the request header that will be used to obtain the hash + * key. If the request header is not present, no hash will be produced. + */ + 'header_name': (string); + /** + * If specified, the request header value will be rewritten and used + * to produce the hash key. + */ + 'regex_rewrite': (_envoy_type_matcher_v3_RegexMatchAndSubstitute__Output | null); +} + +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto + +/** + * Configures :ref:`internal redirect ` behavior. + * [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.] + * @deprecated + */ +export const _envoy_config_route_v3_RouteAction_InternalRedirectAction = { + PASS_THROUGH_INTERNAL_REDIRECT: 'PASS_THROUGH_INTERNAL_REDIRECT', + HANDLE_INTERNAL_REDIRECT: 'HANDLE_INTERNAL_REDIRECT', +} as const; + +/** + * Configures :ref:`internal redirect ` behavior. + * [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.] + * @deprecated + */ +export type _envoy_config_route_v3_RouteAction_InternalRedirectAction = + | 'PASS_THROUGH_INTERNAL_REDIRECT' + | 0 + | 'HANDLE_INTERNAL_REDIRECT' + | 1 + +/** + * Configures :ref:`internal redirect ` behavior. + * [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.] + * @deprecated + */ +export type _envoy_config_route_v3_RouteAction_InternalRedirectAction__Output = typeof _envoy_config_route_v3_RouteAction_InternalRedirectAction[keyof typeof _envoy_config_route_v3_RouteAction_InternalRedirectAction] + +export interface _envoy_config_route_v3_RouteAction_MaxStreamDuration { + /** + * Specifies the maximum duration allowed for streams on the route. If not specified, the value + * from the :ref:`max_stream_duration + * ` field in + * :ref:`HttpConnectionManager.common_http_protocol_options + * ` + * is used. If this field is set explicitly to zero, any + * HttpConnectionManager max_stream_duration timeout will be disabled for + * this route. + */ + 'max_stream_duration'?: (_google_protobuf_Duration | null); + /** + * If present, and the request contains a `grpc-timeout header + * `_, use that value as the + * ``max_stream_duration``, but limit the applied timeout to the maximum value specified here. + * If set to 0, the ``grpc-timeout`` header is used without modification. + */ + 'grpc_timeout_header_max'?: (_google_protobuf_Duration | null); + /** + * If present, Envoy will adjust the timeout provided by the ``grpc-timeout`` header by + * subtracting the provided duration from the header. This is useful for allowing Envoy to set + * its global timeout to be less than that of the deadline imposed by the calling client, which + * makes it more likely that Envoy will handle the timeout instead of having the call canceled + * by the client. If, after applying the offset, the resulting timeout is zero or negative, + * the stream will timeout immediately. + */ + 'grpc_timeout_header_offset'?: (_google_protobuf_Duration | null); +} + +export interface _envoy_config_route_v3_RouteAction_MaxStreamDuration__Output { + /** + * Specifies the maximum duration allowed for streams on the route. If not specified, the value + * from the :ref:`max_stream_duration + * ` field in + * :ref:`HttpConnectionManager.common_http_protocol_options + * ` + * is used. If this field is set explicitly to zero, any + * HttpConnectionManager max_stream_duration timeout will be disabled for + * this route. + */ + 'max_stream_duration': (_google_protobuf_Duration__Output | null); + /** + * If present, and the request contains a `grpc-timeout header + * `_, use that value as the + * ``max_stream_duration``, but limit the applied timeout to the maximum value specified here. + * If set to 0, the ``grpc-timeout`` header is used without modification. + */ + 'grpc_timeout_header_max': (_google_protobuf_Duration__Output | null); + /** + * If present, Envoy will adjust the timeout provided by the ``grpc-timeout`` header by + * subtracting the provided duration from the header. This is useful for allowing Envoy to set + * its global timeout to be less than that of the deadline imposed by the calling client, which + * makes it more likely that Envoy will handle the timeout instead of having the call canceled + * by the client. If, after applying the offset, the resulting timeout is zero or negative, + * the stream will timeout immediately. + */ + 'grpc_timeout_header_offset': (_google_protobuf_Duration__Output | null); +} + +export interface _envoy_config_route_v3_RouteAction_HashPolicy_QueryParameter { + /** + * The name of the URL query parameter that will be used to obtain the hash + * key. If the parameter is not present, no hash will be produced. Query + * parameter names are case-sensitive. + */ + 'name'?: (string); +} + +export interface _envoy_config_route_v3_RouteAction_HashPolicy_QueryParameter__Output { + /** + * The name of the URL query parameter that will be used to obtain the hash + * key. If the parameter is not present, no hash will be produced. Query + * parameter names are case-sensitive. + */ + 'name': (string); +} + +/** + * The router is capable of shadowing traffic from one cluster to another. The current + * implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to + * respond before returning the response from the primary cluster. All normal statistics are + * collected for the shadow cluster making this feature useful for testing. + * + * During shadowing, the host/authority header is altered such that ``-shadow`` is appended. This is + * useful for logging. For example, ``cluster1`` becomes ``cluster1-shadow``. + * + * .. note:: + * + * Shadowing will not be triggered if the primary cluster does not exist. + * + * .. note:: + * + * Shadowing doesn't support Http CONNECT and upgrades. + * [#next-free-field: 6] + */ +export interface _envoy_config_route_v3_RouteAction_RequestMirrorPolicy { + /** + * Only one of ``cluster`` and ``cluster_header`` can be specified. + * [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1}] + * Specifies the cluster that requests will be mirrored to. The cluster must + * exist in the cluster manager configuration. + */ + 'cluster'?: (string); + /** + * Only one of ``cluster`` and ``cluster_header`` can be specified. + * Envoy will determine the cluster to route to by reading the value of the + * HTTP header named by cluster_header from the request headers. Only the first value in header is used, + * and no shadow request will happen if the value is not found in headers. Envoy will not wait for + * the shadow cluster to respond before returning the response from the primary cluster. + * + * .. attention:: + * + * Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 + * ``Host`` header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. + * + * .. note:: + * + * If the header appears multiple times only the first value is used. + */ + 'cluster_header'?: (string); + /** + * If not specified, all requests to the target cluster will be mirrored. + * + * If specified, this field takes precedence over the ``runtime_key`` field and requests must also + * fall under the percentage of matches indicated by this field. + * + * For some fraction N/D, a random number in the range [0,D) is selected. If the + * number is <= the value of the numerator N, or if the key is not present, the default + * value, the request will be mirrored. + */ + 'runtime_fraction'?: (_envoy_config_core_v3_RuntimeFractionalPercent | null); + /** + * Determines if the trace span should be sampled. Defaults to true. + */ + 'trace_sampled'?: (_google_protobuf_BoolValue | null); +} + +/** + * The router is capable of shadowing traffic from one cluster to another. The current + * implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to + * respond before returning the response from the primary cluster. All normal statistics are + * collected for the shadow cluster making this feature useful for testing. + * + * During shadowing, the host/authority header is altered such that ``-shadow`` is appended. This is + * useful for logging. For example, ``cluster1`` becomes ``cluster1-shadow``. + * + * .. note:: + * + * Shadowing will not be triggered if the primary cluster does not exist. + * + * .. note:: + * + * Shadowing doesn't support Http CONNECT and upgrades. + * [#next-free-field: 6] + */ +export interface _envoy_config_route_v3_RouteAction_RequestMirrorPolicy__Output { + /** + * Only one of ``cluster`` and ``cluster_header`` can be specified. + * [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1}] + * Specifies the cluster that requests will be mirrored to. The cluster must + * exist in the cluster manager configuration. + */ + 'cluster': (string); + /** + * Only one of ``cluster`` and ``cluster_header`` can be specified. + * Envoy will determine the cluster to route to by reading the value of the + * HTTP header named by cluster_header from the request headers. Only the first value in header is used, + * and no shadow request will happen if the value is not found in headers. Envoy will not wait for + * the shadow cluster to respond before returning the response from the primary cluster. + * + * .. attention:: + * + * Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 + * ``Host`` header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. + * + * .. note:: + * + * If the header appears multiple times only the first value is used. + */ + 'cluster_header': (string); + /** + * If not specified, all requests to the target cluster will be mirrored. + * + * If specified, this field takes precedence over the ``runtime_key`` field and requests must also + * fall under the percentage of matches indicated by this field. + * + * For some fraction N/D, a random number in the range [0,D) is selected. If the + * number is <= the value of the numerator N, or if the key is not present, the default + * value, the request will be mirrored. + */ + 'runtime_fraction': (_envoy_config_core_v3_RuntimeFractionalPercent__Output | null); + /** + * Determines if the trace span should be sampled. Defaults to true. + */ + 'trace_sampled': (_google_protobuf_BoolValue__Output | null); +} + +/** + * Allows enabling and disabling upgrades on a per-route basis. + * This overrides any enabled/disabled upgrade filter chain specified in the + * HttpConnectionManager + * :ref:`upgrade_configs + * ` + * but does not affect any custom filter chain specified there. + */ +export interface _envoy_config_route_v3_RouteAction_UpgradeConfig { + /** + * The case-insensitive name of this upgrade, e.g. "websocket". + * For each upgrade type present in upgrade_configs, requests with + * Upgrade: [upgrade_type] will be proxied upstream. + */ + 'upgrade_type'?: (string); + /** + * Determines if upgrades are available on this route. Defaults to true. + */ + 'enabled'?: (_google_protobuf_BoolValue | null); + /** + * Configuration for sending data upstream as a raw data payload. This is used for + * CONNECT requests, when forwarding CONNECT payload as raw TCP. + * Note that CONNECT support is currently considered alpha in Envoy. + * [#comment: TODO(htuch): Replace the above comment with an alpha tag.] + */ + 'connect_config'?: (_envoy_config_route_v3_RouteAction_UpgradeConfig_ConnectConfig | null); +} + +/** + * Allows enabling and disabling upgrades on a per-route basis. + * This overrides any enabled/disabled upgrade filter chain specified in the + * HttpConnectionManager + * :ref:`upgrade_configs + * ` + * but does not affect any custom filter chain specified there. + */ +export interface _envoy_config_route_v3_RouteAction_UpgradeConfig__Output { + /** + * The case-insensitive name of this upgrade, e.g. "websocket". + * For each upgrade type present in upgrade_configs, requests with + * Upgrade: [upgrade_type] will be proxied upstream. + */ + 'upgrade_type': (string); + /** + * Determines if upgrades are available on this route. Defaults to true. + */ + 'enabled': (_google_protobuf_BoolValue__Output | null); + /** + * Configuration for sending data upstream as a raw data payload. This is used for + * CONNECT requests, when forwarding CONNECT payload as raw TCP. + * Note that CONNECT support is currently considered alpha in Envoy. + * [#comment: TODO(htuch): Replace the above comment with an alpha tag.] + */ + 'connect_config': (_envoy_config_route_v3_RouteAction_UpgradeConfig_ConnectConfig__Output | null); +} + +/** + * [#next-free-field: 42] + */ +export interface RouteAction { + /** + * Indicates the upstream cluster to which the request should be routed + * to. + */ + 'cluster'?: (string); + /** + * Envoy will determine the cluster to route to by reading the value of the + * HTTP header named by cluster_header from the request headers. If the + * header is not found or the referenced cluster does not exist, Envoy will + * return a 404 response. + * + * .. attention:: + * + * Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 + * ``Host`` header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. + * + * .. note:: + * + * If the header appears multiple times only the first value is used. + */ + 'cluster_header'?: (string); + /** + * Multiple upstream clusters can be specified for a given route. The + * request is routed to one of the upstream clusters based on weights + * assigned to each cluster. See + * :ref:`traffic splitting ` + * for additional documentation. + */ + 'weighted_clusters'?: (_envoy_config_route_v3_WeightedCluster | null); + /** + * Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints + * in the upstream cluster with metadata matching what's set in this field will be considered + * for load balancing. If using :ref:`weighted_clusters + * `, metadata will be merged, with values + * provided there taking precedence. The filter name should be specified as ``envoy.lb``. + */ + 'metadata_match'?: (_envoy_config_core_v3_Metadata | null); + /** + * Indicates that during forwarding, the matched prefix (or path) should be + * swapped with this value. This option allows application URLs to be rooted + * at a different path from those exposed at the reverse proxy layer. The router filter will + * place the original path before rewrite into the :ref:`x-envoy-original-path + * ` header. + * + * Only one of :ref:`regex_rewrite ` + * :ref:`path_rewrite_policy `, + * or :ref:`prefix_rewrite ` may be specified. + * + * .. attention:: + * + * Pay careful attention to the use of trailing slashes in the + * :ref:`route's match ` prefix value. + * Stripping a prefix from a path requires multiple Routes to handle all cases. For example, + * rewriting ``/prefix`` to ``/`` and ``/prefix/etc`` to ``/etc`` cannot be done in a single + * :ref:`Route `, as shown by the below config entries: + * + * .. code-block:: yaml + * + * - match: + * prefix: "/prefix/" + * route: + * prefix_rewrite: "/" + * - match: + * prefix: "/prefix" + * route: + * prefix_rewrite: "/" + * + * Having above entries in the config, requests to ``/prefix`` will be stripped to ``/``, while + * requests to ``/prefix/etc`` will be stripped to ``/etc``. + */ + 'prefix_rewrite'?: (string); + /** + * Indicates that during forwarding, the host header will be swapped with + * this value. Using this option will append the + * :ref:`config_http_conn_man_headers_x-forwarded-host` header if + * :ref:`append_x_forwarded_host ` + * is set. + */ + 'host_rewrite_literal'?: (string); + /** + * Indicates that during forwarding, the host header will be swapped with + * the hostname of the upstream host chosen by the cluster manager. This + * option is applicable only when the destination cluster for a route is of + * type ``strict_dns`` or ``logical_dns``. Setting this to true with other cluster types + * has no effect. Using this option will append the + * :ref:`config_http_conn_man_headers_x-forwarded-host` header if + * :ref:`append_x_forwarded_host ` + * is set. + */ + 'auto_host_rewrite'?: (_google_protobuf_BoolValue | null); + /** + * Specifies the upstream timeout for the route. If not specified, the default is 15s. This + * spans between the point at which the entire downstream request (i.e. end-of-stream) has been + * processed and when the upstream response has been completely processed. A value of 0 will + * disable the route's timeout. + * + * .. note:: + * + * This timeout includes all retries. See also + * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, + * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the + * :ref:`retry overview `. + */ + 'timeout'?: (_google_protobuf_Duration | null); + /** + * Indicates that the route has a retry policy. Note that if this is set, + * it'll take precedence over the virtual host level retry policy entirely + * (e.g.: policies are not merged, most internal one becomes the enforced policy). + */ + 'retry_policy'?: (_envoy_config_route_v3_RetryPolicy | null); + /** + * Optionally specifies the :ref:`routing priority `. + */ + 'priority'?: (_envoy_config_core_v3_RoutingPriority); + /** + * Specifies a set of rate limit configurations that could be applied to the + * route. + */ + 'rate_limits'?: (_envoy_config_route_v3_RateLimit)[]; + /** + * Specifies if the rate limit filter should include the virtual host rate + * limits. By default, if the route configured rate limits, the virtual host + * :ref:`rate_limits ` are not applied to the + * request. + * + * This field is deprecated. Please use :ref:`vh_rate_limits ` + * @deprecated + */ + 'include_vh_rate_limits'?: (_google_protobuf_BoolValue | null); + /** + * Specifies a list of hash policies to use for ring hash load balancing. Each + * hash policy is evaluated individually and the combined result is used to + * route the request. The method of combination is deterministic such that + * identical lists of hash policies will produce the same hash. Since a hash + * policy examines specific parts of a request, it can fail to produce a hash + * (i.e. if the hashed header is not present). If (and only if) all configured + * hash policies fail to generate a hash, no hash will be produced for + * the route. In this case, the behavior is the same as if no hash policies + * were specified (i.e. the ring hash load balancer will choose a random + * backend). If a hash policy has the "terminal" attribute set to true, and + * there is already a hash generated, the hash is returned immediately, + * ignoring the rest of the hash policy list. + */ + 'hash_policy'?: (_envoy_config_route_v3_RouteAction_HashPolicy)[]; + /** + * Indicates that the route has a CORS policy. This field is ignored if related cors policy is + * found in the :ref:`Route.typed_per_filter_config` or + * :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config`. + * + * .. attention:: + * + * This option has been deprecated. Please use + * :ref:`Route.typed_per_filter_config` or + * :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config` + * to configure the CORS HTTP filter. + * @deprecated + */ + 'cors'?: (_envoy_config_route_v3_CorsPolicy | null); + /** + * The HTTP status code to use when configured cluster is not found. + * The default response code is 503 Service Unavailable. + */ + 'cluster_not_found_response_code'?: (_envoy_config_route_v3_RouteAction_ClusterNotFoundResponseCode); + /** + * Deprecated by :ref:`grpc_timeout_header_max ` + * If present, and the request is a gRPC request, use the + * `grpc-timeout header `_, + * or its default value (infinity) instead of + * :ref:`timeout `, but limit the applied timeout + * to the maximum value specified here. If configured as 0, the maximum allowed timeout for + * gRPC requests is infinity. If not configured at all, the ``grpc-timeout`` header is not used + * and gRPC requests time out like any other requests using + * :ref:`timeout ` or its default. + * This can be used to prevent unexpected upstream request timeouts due to potentially long + * time gaps between gRPC request and response in gRPC streaming mode. + * + * .. note:: + * + * If a timeout is specified using :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes + * precedence over `grpc-timeout header `_, when + * both are present. See also + * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, + * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the + * :ref:`retry overview `. + * @deprecated + */ + 'max_grpc_timeout'?: (_google_protobuf_Duration | null); + /** + * Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, + * although the connection manager wide :ref:`stream_idle_timeout + * ` + * will still apply. A value of 0 will completely disable the route's idle timeout, even if a + * connection manager stream idle timeout is configured. + * + * The idle timeout is distinct to :ref:`timeout + * `, which provides an upper bound + * on the upstream response time; :ref:`idle_timeout + * ` instead bounds the amount + * of time the request's stream may be idle. + * + * After header decoding, the idle timeout will apply on downstream and + * upstream request events. Each time an encode/decode event for headers or + * data is processed for the stream, the timer will be reset. If the timeout + * fires, the stream is terminated with a 408 Request Timeout error code if no + * upstream response header has been received, otherwise a stream reset + * occurs. + * + * If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" + * is configured, this timeout is scaled according to the value for + * :ref:`HTTP_DOWNSTREAM_STREAM_IDLE `. + */ + 'idle_timeout'?: (_google_protobuf_Duration | null); + 'upgrade_configs'?: (_envoy_config_route_v3_RouteAction_UpgradeConfig)[]; + /** + * @deprecated + */ + 'internal_redirect_action'?: (_envoy_config_route_v3_RouteAction_InternalRedirectAction); + /** + * Indicates that the route has a hedge policy. Note that if this is set, + * it'll take precedence over the virtual host level hedge policy entirely + * (e.g.: policies are not merged, most internal one becomes the enforced policy). + */ + 'hedge_policy'?: (_envoy_config_route_v3_HedgePolicy | null); + /** + * Deprecated by :ref:`grpc_timeout_header_offset `. + * If present, Envoy will adjust the timeout provided by the ``grpc-timeout`` header by subtracting + * the provided duration from the header. This is useful in allowing Envoy to set its global + * timeout to be less than that of the deadline imposed by the calling client, which makes it more + * likely that Envoy will handle the timeout instead of having the call canceled by the client. + * The offset will only be applied if the provided grpc_timeout is greater than the offset. This + * ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning + * infinity). + * @deprecated + */ + 'grpc_timeout_offset'?: (_google_protobuf_Duration | null); + /** + * Indicates that during forwarding, the host header will be swapped with the content of given + * downstream or :ref:`custom ` header. + * If header value is empty, host header is left intact. Using this option will append the + * :ref:`config_http_conn_man_headers_x-forwarded-host` header if + * :ref:`append_x_forwarded_host ` + * is set. + * + * .. attention:: + * + * Pay attention to the potential security implications of using this option. Provided header + * must come from trusted source. + * + * .. note:: + * + * If the header appears multiple times only the first value is used. + */ + 'host_rewrite_header'?: (string); + /** + * Specify a set of route request mirroring policies. + * It takes precedence over the virtual host and route config mirror policy entirely. + * That is, policies are not merged, the most specific non-empty one becomes the mirror policies. + */ + 'request_mirror_policies'?: (_envoy_config_route_v3_RouteAction_RequestMirrorPolicy)[]; + /** + * An internal redirect is handled, iff the number of previous internal redirects that a + * downstream request has encountered is lower than this value, and + * :ref:`internal_redirect_action ` + * is set to :ref:`HANDLE_INTERNAL_REDIRECT + * ` + * In the case where a downstream request is bounced among multiple routes by internal redirect, + * the first route that hits this threshold, or has + * :ref:`internal_redirect_action ` + * set to + * :ref:`PASS_THROUGH_INTERNAL_REDIRECT + * ` + * will pass the redirect back to downstream. + * + * If not specified, at most one redirect will be followed. + * @deprecated + */ + 'max_internal_redirects'?: (_google_protobuf_UInt32Value | null); + /** + * Indicates that during forwarding, portions of the path that match the + * pattern should be rewritten, even allowing the substitution of capture + * groups from the pattern into the new path as specified by the rewrite + * substitution string. This is useful to allow application paths to be + * rewritten in a way that is aware of segments with variable content like + * identifiers. The router filter will place the original path as it was + * before the rewrite into the :ref:`x-envoy-original-path + * ` header. + * + * Only one of :ref:`regex_rewrite `, + * :ref:`prefix_rewrite `, or + * :ref:`path_rewrite_policy `] + * may be specified. + * + * Examples using Google's `RE2 `_ engine: + * + * * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution + * string of ``\2/instance/\1`` would transform ``/service/foo/v1/api`` + * into ``/v1/api/instance/foo``. + * + * * The pattern ``one`` paired with a substitution string of ``two`` would + * transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``. + * + * * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of + * ``\1two\2`` would replace only the first occurrence of ``one``, + * transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``. + * + * * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` + * would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to + * ``/aaa/yyy/bbb``. + */ + 'regex_rewrite'?: (_envoy_type_matcher_v3_RegexMatchAndSubstitute | null); + /** + * [#not-implemented-hide:] + * Specifies the configuration for retry policy extension. Note that if this is set, it'll take + * precedence over the virtual host level retry policy entirely (e.g.: policies are not merged, + * most internal one becomes the enforced policy). :ref:`Retry policy ` + * should not be set if this field is used. + */ + 'retry_policy_typed_config'?: (_google_protobuf_Any | null); + /** + * If present, Envoy will try to follow an upstream redirect response instead of proxying the + * response back to the downstream. An upstream redirect response is defined + * by :ref:`redirect_response_codes + * `. + */ + 'internal_redirect_policy'?: (_envoy_config_route_v3_InternalRedirectPolicy | null); + /** + * Indicates that during forwarding, the host header will be swapped with + * the result of the regex substitution executed on path value with query and fragment removed. + * This is useful for transitioning variable content between path segment and subdomain. + * Using this option will append the + * :ref:`config_http_conn_man_headers_x-forwarded-host` header if + * :ref:`append_x_forwarded_host ` + * is set. + * + * For example with the following config: + * + * .. code-block:: yaml + * + * host_rewrite_path_regex: + * pattern: + * google_re2: {} + * regex: "^/(.+)/.+$" + * substitution: \1 + * + * Would rewrite the host header to ``envoyproxy.io`` given the path ``/envoyproxy.io/some/path``. + */ + 'host_rewrite_path_regex'?: (_envoy_type_matcher_v3_RegexMatchAndSubstitute | null); + /** + * Specifies the maximum stream duration for this route. + */ + 'max_stream_duration'?: (_envoy_config_route_v3_RouteAction_MaxStreamDuration | null); + /** + * Name of the cluster specifier plugin to use to determine the cluster for requests on this route. + * The cluster specifier plugin name must be defined in the associated + * :ref:`cluster specifier plugins ` + * in the :ref:`name ` field. + */ + 'cluster_specifier_plugin'?: (string); + /** + * If set, then a host rewrite action (one of + * :ref:`host_rewrite_literal `, + * :ref:`auto_host_rewrite `, + * :ref:`host_rewrite_header `, or + * :ref:`host_rewrite_path_regex `) + * causes the original value of the host header, if any, to be appended to the + * :ref:`config_http_conn_man_headers_x-forwarded-host` HTTP header if it is different to the last value appended. + * This can be disabled by setting the runtime guard `envoy_reloadable_features_append_xfh_idempotent` to false. + */ + 'append_x_forwarded_host'?: (boolean); + /** + * Custom cluster specifier plugin configuration to use to determine the cluster for requests + * on this route. + */ + 'inline_cluster_specifier_plugin'?: (_envoy_config_route_v3_ClusterSpecifierPlugin | null); + /** + * Specifies how to send request over TLS early data. + * If absent, allows `safe HTTP requests `_ to be sent on early data. + * [#extension-category: envoy.route.early_data_policy] + */ + 'early_data_policy'?: (_envoy_config_core_v3_TypedExtensionConfig | null); + /** + * [#extension-category: envoy.path.rewrite] + */ + 'path_rewrite_policy'?: (_envoy_config_core_v3_TypedExtensionConfig | null); + 'cluster_specifier'?: "cluster"|"cluster_header"|"weighted_clusters"|"cluster_specifier_plugin"|"inline_cluster_specifier_plugin"; + 'host_rewrite_specifier'?: "host_rewrite_literal"|"auto_host_rewrite"|"host_rewrite_header"|"host_rewrite_path_regex"; +} + +/** + * [#next-free-field: 42] + */ +export interface RouteAction__Output { + /** + * Indicates the upstream cluster to which the request should be routed + * to. + */ + 'cluster'?: (string); + /** + * Envoy will determine the cluster to route to by reading the value of the + * HTTP header named by cluster_header from the request headers. If the + * header is not found or the referenced cluster does not exist, Envoy will + * return a 404 response. + * + * .. attention:: + * + * Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 + * ``Host`` header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. + * + * .. note:: + * + * If the header appears multiple times only the first value is used. + */ + 'cluster_header'?: (string); + /** + * Multiple upstream clusters can be specified for a given route. The + * request is routed to one of the upstream clusters based on weights + * assigned to each cluster. See + * :ref:`traffic splitting ` + * for additional documentation. + */ + 'weighted_clusters'?: (_envoy_config_route_v3_WeightedCluster__Output | null); + /** + * Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints + * in the upstream cluster with metadata matching what's set in this field will be considered + * for load balancing. If using :ref:`weighted_clusters + * `, metadata will be merged, with values + * provided there taking precedence. The filter name should be specified as ``envoy.lb``. + */ + 'metadata_match': (_envoy_config_core_v3_Metadata__Output | null); + /** + * Indicates that during forwarding, the matched prefix (or path) should be + * swapped with this value. This option allows application URLs to be rooted + * at a different path from those exposed at the reverse proxy layer. The router filter will + * place the original path before rewrite into the :ref:`x-envoy-original-path + * ` header. + * + * Only one of :ref:`regex_rewrite ` + * :ref:`path_rewrite_policy `, + * or :ref:`prefix_rewrite ` may be specified. + * + * .. attention:: + * + * Pay careful attention to the use of trailing slashes in the + * :ref:`route's match ` prefix value. + * Stripping a prefix from a path requires multiple Routes to handle all cases. For example, + * rewriting ``/prefix`` to ``/`` and ``/prefix/etc`` to ``/etc`` cannot be done in a single + * :ref:`Route `, as shown by the below config entries: + * + * .. code-block:: yaml + * + * - match: + * prefix: "/prefix/" + * route: + * prefix_rewrite: "/" + * - match: + * prefix: "/prefix" + * route: + * prefix_rewrite: "/" + * + * Having above entries in the config, requests to ``/prefix`` will be stripped to ``/``, while + * requests to ``/prefix/etc`` will be stripped to ``/etc``. + */ + 'prefix_rewrite': (string); + /** + * Indicates that during forwarding, the host header will be swapped with + * this value. Using this option will append the + * :ref:`config_http_conn_man_headers_x-forwarded-host` header if + * :ref:`append_x_forwarded_host ` + * is set. + */ + 'host_rewrite_literal'?: (string); + /** + * Indicates that during forwarding, the host header will be swapped with + * the hostname of the upstream host chosen by the cluster manager. This + * option is applicable only when the destination cluster for a route is of + * type ``strict_dns`` or ``logical_dns``. Setting this to true with other cluster types + * has no effect. Using this option will append the + * :ref:`config_http_conn_man_headers_x-forwarded-host` header if + * :ref:`append_x_forwarded_host ` + * is set. + */ + 'auto_host_rewrite'?: (_google_protobuf_BoolValue__Output | null); + /** + * Specifies the upstream timeout for the route. If not specified, the default is 15s. This + * spans between the point at which the entire downstream request (i.e. end-of-stream) has been + * processed and when the upstream response has been completely processed. A value of 0 will + * disable the route's timeout. + * + * .. note:: + * + * This timeout includes all retries. See also + * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, + * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the + * :ref:`retry overview `. + */ + 'timeout': (_google_protobuf_Duration__Output | null); + /** + * Indicates that the route has a retry policy. Note that if this is set, + * it'll take precedence over the virtual host level retry policy entirely + * (e.g.: policies are not merged, most internal one becomes the enforced policy). + */ + 'retry_policy': (_envoy_config_route_v3_RetryPolicy__Output | null); + /** + * Optionally specifies the :ref:`routing priority `. + */ + 'priority': (_envoy_config_core_v3_RoutingPriority__Output); + /** + * Specifies a set of rate limit configurations that could be applied to the + * route. + */ + 'rate_limits': (_envoy_config_route_v3_RateLimit__Output)[]; + /** + * Specifies if the rate limit filter should include the virtual host rate + * limits. By default, if the route configured rate limits, the virtual host + * :ref:`rate_limits ` are not applied to the + * request. + * + * This field is deprecated. Please use :ref:`vh_rate_limits ` + * @deprecated + */ + 'include_vh_rate_limits': (_google_protobuf_BoolValue__Output | null); + /** + * Specifies a list of hash policies to use for ring hash load balancing. Each + * hash policy is evaluated individually and the combined result is used to + * route the request. The method of combination is deterministic such that + * identical lists of hash policies will produce the same hash. Since a hash + * policy examines specific parts of a request, it can fail to produce a hash + * (i.e. if the hashed header is not present). If (and only if) all configured + * hash policies fail to generate a hash, no hash will be produced for + * the route. In this case, the behavior is the same as if no hash policies + * were specified (i.e. the ring hash load balancer will choose a random + * backend). If a hash policy has the "terminal" attribute set to true, and + * there is already a hash generated, the hash is returned immediately, + * ignoring the rest of the hash policy list. + */ + 'hash_policy': (_envoy_config_route_v3_RouteAction_HashPolicy__Output)[]; + /** + * Indicates that the route has a CORS policy. This field is ignored if related cors policy is + * found in the :ref:`Route.typed_per_filter_config` or + * :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config`. + * + * .. attention:: + * + * This option has been deprecated. Please use + * :ref:`Route.typed_per_filter_config` or + * :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config` + * to configure the CORS HTTP filter. + * @deprecated + */ + 'cors': (_envoy_config_route_v3_CorsPolicy__Output | null); + /** + * The HTTP status code to use when configured cluster is not found. + * The default response code is 503 Service Unavailable. + */ + 'cluster_not_found_response_code': (_envoy_config_route_v3_RouteAction_ClusterNotFoundResponseCode__Output); + /** + * Deprecated by :ref:`grpc_timeout_header_max ` + * If present, and the request is a gRPC request, use the + * `grpc-timeout header `_, + * or its default value (infinity) instead of + * :ref:`timeout `, but limit the applied timeout + * to the maximum value specified here. If configured as 0, the maximum allowed timeout for + * gRPC requests is infinity. If not configured at all, the ``grpc-timeout`` header is not used + * and gRPC requests time out like any other requests using + * :ref:`timeout ` or its default. + * This can be used to prevent unexpected upstream request timeouts due to potentially long + * time gaps between gRPC request and response in gRPC streaming mode. + * + * .. note:: + * + * If a timeout is specified using :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes + * precedence over `grpc-timeout header `_, when + * both are present. See also + * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, + * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the + * :ref:`retry overview `. + * @deprecated + */ + 'max_grpc_timeout': (_google_protobuf_Duration__Output | null); + /** + * Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, + * although the connection manager wide :ref:`stream_idle_timeout + * ` + * will still apply. A value of 0 will completely disable the route's idle timeout, even if a + * connection manager stream idle timeout is configured. + * + * The idle timeout is distinct to :ref:`timeout + * `, which provides an upper bound + * on the upstream response time; :ref:`idle_timeout + * ` instead bounds the amount + * of time the request's stream may be idle. + * + * After header decoding, the idle timeout will apply on downstream and + * upstream request events. Each time an encode/decode event for headers or + * data is processed for the stream, the timer will be reset. If the timeout + * fires, the stream is terminated with a 408 Request Timeout error code if no + * upstream response header has been received, otherwise a stream reset + * occurs. + * + * If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" + * is configured, this timeout is scaled according to the value for + * :ref:`HTTP_DOWNSTREAM_STREAM_IDLE `. + */ + 'idle_timeout': (_google_protobuf_Duration__Output | null); + 'upgrade_configs': (_envoy_config_route_v3_RouteAction_UpgradeConfig__Output)[]; + /** + * @deprecated + */ + 'internal_redirect_action': (_envoy_config_route_v3_RouteAction_InternalRedirectAction__Output); + /** + * Indicates that the route has a hedge policy. Note that if this is set, + * it'll take precedence over the virtual host level hedge policy entirely + * (e.g.: policies are not merged, most internal one becomes the enforced policy). + */ + 'hedge_policy': (_envoy_config_route_v3_HedgePolicy__Output | null); + /** + * Deprecated by :ref:`grpc_timeout_header_offset `. + * If present, Envoy will adjust the timeout provided by the ``grpc-timeout`` header by subtracting + * the provided duration from the header. This is useful in allowing Envoy to set its global + * timeout to be less than that of the deadline imposed by the calling client, which makes it more + * likely that Envoy will handle the timeout instead of having the call canceled by the client. + * The offset will only be applied if the provided grpc_timeout is greater than the offset. This + * ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning + * infinity). + * @deprecated + */ + 'grpc_timeout_offset': (_google_protobuf_Duration__Output | null); + /** + * Indicates that during forwarding, the host header will be swapped with the content of given + * downstream or :ref:`custom ` header. + * If header value is empty, host header is left intact. Using this option will append the + * :ref:`config_http_conn_man_headers_x-forwarded-host` header if + * :ref:`append_x_forwarded_host ` + * is set. + * + * .. attention:: + * + * Pay attention to the potential security implications of using this option. Provided header + * must come from trusted source. + * + * .. note:: + * + * If the header appears multiple times only the first value is used. + */ + 'host_rewrite_header'?: (string); + /** + * Specify a set of route request mirroring policies. + * It takes precedence over the virtual host and route config mirror policy entirely. + * That is, policies are not merged, the most specific non-empty one becomes the mirror policies. + */ + 'request_mirror_policies': (_envoy_config_route_v3_RouteAction_RequestMirrorPolicy__Output)[]; + /** + * An internal redirect is handled, iff the number of previous internal redirects that a + * downstream request has encountered is lower than this value, and + * :ref:`internal_redirect_action ` + * is set to :ref:`HANDLE_INTERNAL_REDIRECT + * ` + * In the case where a downstream request is bounced among multiple routes by internal redirect, + * the first route that hits this threshold, or has + * :ref:`internal_redirect_action ` + * set to + * :ref:`PASS_THROUGH_INTERNAL_REDIRECT + * ` + * will pass the redirect back to downstream. + * + * If not specified, at most one redirect will be followed. + * @deprecated + */ + 'max_internal_redirects': (_google_protobuf_UInt32Value__Output | null); + /** + * Indicates that during forwarding, portions of the path that match the + * pattern should be rewritten, even allowing the substitution of capture + * groups from the pattern into the new path as specified by the rewrite + * substitution string. This is useful to allow application paths to be + * rewritten in a way that is aware of segments with variable content like + * identifiers. The router filter will place the original path as it was + * before the rewrite into the :ref:`x-envoy-original-path + * ` header. + * + * Only one of :ref:`regex_rewrite `, + * :ref:`prefix_rewrite `, or + * :ref:`path_rewrite_policy `] + * may be specified. + * + * Examples using Google's `RE2 `_ engine: + * + * * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution + * string of ``\2/instance/\1`` would transform ``/service/foo/v1/api`` + * into ``/v1/api/instance/foo``. + * + * * The pattern ``one`` paired with a substitution string of ``two`` would + * transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``. + * + * * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of + * ``\1two\2`` would replace only the first occurrence of ``one``, + * transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``. + * + * * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` + * would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to + * ``/aaa/yyy/bbb``. + */ + 'regex_rewrite': (_envoy_type_matcher_v3_RegexMatchAndSubstitute__Output | null); + /** + * [#not-implemented-hide:] + * Specifies the configuration for retry policy extension. Note that if this is set, it'll take + * precedence over the virtual host level retry policy entirely (e.g.: policies are not merged, + * most internal one becomes the enforced policy). :ref:`Retry policy ` + * should not be set if this field is used. + */ + 'retry_policy_typed_config': (_google_protobuf_Any__Output | null); + /** + * If present, Envoy will try to follow an upstream redirect response instead of proxying the + * response back to the downstream. An upstream redirect response is defined + * by :ref:`redirect_response_codes + * `. + */ + 'internal_redirect_policy': (_envoy_config_route_v3_InternalRedirectPolicy__Output | null); + /** + * Indicates that during forwarding, the host header will be swapped with + * the result of the regex substitution executed on path value with query and fragment removed. + * This is useful for transitioning variable content between path segment and subdomain. + * Using this option will append the + * :ref:`config_http_conn_man_headers_x-forwarded-host` header if + * :ref:`append_x_forwarded_host ` + * is set. + * + * For example with the following config: + * + * .. code-block:: yaml + * + * host_rewrite_path_regex: + * pattern: + * google_re2: {} + * regex: "^/(.+)/.+$" + * substitution: \1 + * + * Would rewrite the host header to ``envoyproxy.io`` given the path ``/envoyproxy.io/some/path``. + */ + 'host_rewrite_path_regex'?: (_envoy_type_matcher_v3_RegexMatchAndSubstitute__Output | null); + /** + * Specifies the maximum stream duration for this route. + */ + 'max_stream_duration': (_envoy_config_route_v3_RouteAction_MaxStreamDuration__Output | null); + /** + * Name of the cluster specifier plugin to use to determine the cluster for requests on this route. + * The cluster specifier plugin name must be defined in the associated + * :ref:`cluster specifier plugins ` + * in the :ref:`name ` field. + */ + 'cluster_specifier_plugin'?: (string); + /** + * If set, then a host rewrite action (one of + * :ref:`host_rewrite_literal `, + * :ref:`auto_host_rewrite `, + * :ref:`host_rewrite_header `, or + * :ref:`host_rewrite_path_regex `) + * causes the original value of the host header, if any, to be appended to the + * :ref:`config_http_conn_man_headers_x-forwarded-host` HTTP header if it is different to the last value appended. + * This can be disabled by setting the runtime guard `envoy_reloadable_features_append_xfh_idempotent` to false. + */ + 'append_x_forwarded_host': (boolean); + /** + * Custom cluster specifier plugin configuration to use to determine the cluster for requests + * on this route. + */ + 'inline_cluster_specifier_plugin'?: (_envoy_config_route_v3_ClusterSpecifierPlugin__Output | null); + /** + * Specifies how to send request over TLS early data. + * If absent, allows `safe HTTP requests `_ to be sent on early data. + * [#extension-category: envoy.route.early_data_policy] + */ + 'early_data_policy': (_envoy_config_core_v3_TypedExtensionConfig__Output | null); + /** + * [#extension-category: envoy.path.rewrite] + */ + 'path_rewrite_policy': (_envoy_config_core_v3_TypedExtensionConfig__Output | null); + 'cluster_specifier': "cluster"|"cluster_header"|"weighted_clusters"|"cluster_specifier_plugin"|"inline_cluster_specifier_plugin"; + 'host_rewrite_specifier': "host_rewrite_literal"|"auto_host_rewrite"|"host_rewrite_header"|"host_rewrite_path_regex"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/route/v3/RouteConfiguration.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/RouteConfiguration.ts new file mode 100644 index 000000000..1eddc6528 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/RouteConfiguration.ts @@ -0,0 +1,292 @@ +// Original file: deps/envoy-api/envoy/config/route/v3/route.proto + +import type { VirtualHost as _envoy_config_route_v3_VirtualHost, VirtualHost__Output as _envoy_config_route_v3_VirtualHost__Output } from '../../../../envoy/config/route/v3/VirtualHost'; +import type { HeaderValueOption as _envoy_config_core_v3_HeaderValueOption, HeaderValueOption__Output as _envoy_config_core_v3_HeaderValueOption__Output } from '../../../../envoy/config/core/v3/HeaderValueOption'; +import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; +import type { Vhds as _envoy_config_route_v3_Vhds, Vhds__Output as _envoy_config_route_v3_Vhds__Output } from '../../../../envoy/config/route/v3/Vhds'; +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; +import type { ClusterSpecifierPlugin as _envoy_config_route_v3_ClusterSpecifierPlugin, ClusterSpecifierPlugin__Output as _envoy_config_route_v3_ClusterSpecifierPlugin__Output } from '../../../../envoy/config/route/v3/ClusterSpecifierPlugin'; +import type { _envoy_config_route_v3_RouteAction_RequestMirrorPolicy, _envoy_config_route_v3_RouteAction_RequestMirrorPolicy__Output } from '../../../../envoy/config/route/v3/RouteAction'; +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; + +/** + * [#next-free-field: 17] + */ +export interface RouteConfiguration { + /** + * The name of the route configuration. For example, it might match + * :ref:`route_config_name + * ` in + * :ref:`envoy_v3_api_msg_extensions.filters.network.http_connection_manager.v3.Rds`. + */ + 'name'?: (string); + /** + * An array of virtual hosts that make up the route table. + */ + 'virtual_hosts'?: (_envoy_config_route_v3_VirtualHost)[]; + /** + * Optionally specifies a list of HTTP headers that the connection manager + * will consider to be internal only. If they are found on external requests they will be cleaned + * prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more + * information. + */ + 'internal_only_headers'?: (string)[]; + /** + * Specifies a list of HTTP headers that should be added to each response that + * the connection manager encodes. Headers specified at this level are applied + * after headers from any enclosed :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` or + * :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. For more information, including details on + * header value syntax, see the documentation on :ref:`custom request headers + * `. + */ + 'response_headers_to_add'?: (_envoy_config_core_v3_HeaderValueOption)[]; + /** + * Specifies a list of HTTP headers that should be removed from each response + * that the connection manager encodes. + */ + 'response_headers_to_remove'?: (string)[]; + /** + * Specifies a list of HTTP headers that should be added to each request + * routed by the HTTP connection manager. Headers specified at this level are + * applied after headers from any enclosed :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` or + * :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. For more information, including details on + * header value syntax, see the documentation on :ref:`custom request headers + * `. + */ + 'request_headers_to_add'?: (_envoy_config_core_v3_HeaderValueOption)[]; + /** + * An optional boolean that specifies whether the clusters that the route + * table refers to will be validated by the cluster manager. If set to true + * and a route refers to a non-existent cluster, the route table will not + * load. If set to false and a route refers to a non-existent cluster, the + * route table will load and the router filter will return a 404 if the route + * is selected at runtime. This setting defaults to true if the route table + * is statically defined via the :ref:`route_config + * ` + * option. This setting default to false if the route table is loaded dynamically via the + * :ref:`rds + * ` + * option. Users may wish to override the default behavior in certain cases (for example when + * using CDS with a static route table). + */ + 'validate_clusters'?: (_google_protobuf_BoolValue | null); + /** + * Specifies a list of HTTP headers that should be removed from each request + * routed by the HTTP connection manager. + */ + 'request_headers_to_remove'?: (string)[]; + /** + * An array of virtual hosts will be dynamically loaded via the VHDS API. + * Both ``virtual_hosts`` and ``vhds`` fields will be used when present. ``virtual_hosts`` can be used + * for a base routing table or for infrequently changing virtual hosts. ``vhds`` is used for + * on-demand discovery of virtual hosts. The contents of these two fields will be merged to + * generate a routing table for a given RouteConfiguration, with ``vhds`` derived configuration + * taking precedence. + */ + 'vhds'?: (_envoy_config_route_v3_Vhds | null); + /** + * By default, headers that should be added/removed are evaluated from most to least specific: + * + * * route level + * * virtual host level + * * connection manager level + * + * To allow setting overrides at the route or virtual host level, this order can be reversed + * by setting this option to true. Defaults to false. + */ + 'most_specific_header_mutations_wins'?: (boolean); + /** + * The maximum bytes of the response :ref:`direct response body + * ` size. If not specified the default + * is 4096. + * + * .. warning:: + * + * Envoy currently holds the content of :ref:`direct response body + * ` in memory. Be careful setting + * this to be larger than the default 4KB, since the allocated memory for direct response body + * is not subject to data plane buffering controls. + */ + 'max_direct_response_body_size_bytes'?: (_google_protobuf_UInt32Value | null); + /** + * A list of plugins and their configurations which may be used by a + * :ref:`cluster specifier plugin name ` + * within the route. All ``extension.name`` fields in this list must be unique. + */ + 'cluster_specifier_plugins'?: (_envoy_config_route_v3_ClusterSpecifierPlugin)[]; + /** + * Specify a set of default request mirroring policies which apply to all routes under its virtual hosts. + * Note that policies are not merged, the most specific non-empty one becomes the mirror policies. + */ + 'request_mirror_policies'?: (_envoy_config_route_v3_RouteAction_RequestMirrorPolicy)[]; + /** + * By default, port in :authority header (if any) is used in host matching. + * With this option enabled, Envoy will ignore the port number in the :authority header (if any) when picking VirtualHost. + * NOTE: this option will not strip the port number (if any) contained in route config + * :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`.domains field. + */ + 'ignore_port_in_host_matching'?: (boolean); + /** + * Ignore path-parameters in path-matching. + * Before RFC3986, URI were like(RFC1808): :///;?# + * Envoy by default takes ":path" as ";". + * For users who want to only match path on the "" portion, this option should be true. + */ + 'ignore_path_parameters_in_path_matching'?: (boolean); + /** + * The typed_per_filter_config field can be used to provide RouteConfiguration level per filter config. + * The key should match the :ref:`filter config name + * `. + * The canonical filter name (e.g., ``envoy.filters.http.buffer`` for the HTTP buffer filter) can also + * be used for the backwards compatibility. If there is no entry referred by the filter config name, the + * entry referred by the canonical filter name will be provided to the filters as fallback. + * + * Use of this field is filter specific; + * see the :ref:`HTTP filter documentation ` for if and how it is utilized. + * [#comment: An entry's value may be wrapped in a + * :ref:`FilterConfig` + * message to specify additional options.] + */ + 'typed_per_filter_config'?: ({[key: string]: _google_protobuf_Any}); +} + +/** + * [#next-free-field: 17] + */ +export interface RouteConfiguration__Output { + /** + * The name of the route configuration. For example, it might match + * :ref:`route_config_name + * ` in + * :ref:`envoy_v3_api_msg_extensions.filters.network.http_connection_manager.v3.Rds`. + */ + 'name': (string); + /** + * An array of virtual hosts that make up the route table. + */ + 'virtual_hosts': (_envoy_config_route_v3_VirtualHost__Output)[]; + /** + * Optionally specifies a list of HTTP headers that the connection manager + * will consider to be internal only. If they are found on external requests they will be cleaned + * prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more + * information. + */ + 'internal_only_headers': (string)[]; + /** + * Specifies a list of HTTP headers that should be added to each response that + * the connection manager encodes. Headers specified at this level are applied + * after headers from any enclosed :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` or + * :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. For more information, including details on + * header value syntax, see the documentation on :ref:`custom request headers + * `. + */ + 'response_headers_to_add': (_envoy_config_core_v3_HeaderValueOption__Output)[]; + /** + * Specifies a list of HTTP headers that should be removed from each response + * that the connection manager encodes. + */ + 'response_headers_to_remove': (string)[]; + /** + * Specifies a list of HTTP headers that should be added to each request + * routed by the HTTP connection manager. Headers specified at this level are + * applied after headers from any enclosed :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` or + * :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. For more information, including details on + * header value syntax, see the documentation on :ref:`custom request headers + * `. + */ + 'request_headers_to_add': (_envoy_config_core_v3_HeaderValueOption__Output)[]; + /** + * An optional boolean that specifies whether the clusters that the route + * table refers to will be validated by the cluster manager. If set to true + * and a route refers to a non-existent cluster, the route table will not + * load. If set to false and a route refers to a non-existent cluster, the + * route table will load and the router filter will return a 404 if the route + * is selected at runtime. This setting defaults to true if the route table + * is statically defined via the :ref:`route_config + * ` + * option. This setting default to false if the route table is loaded dynamically via the + * :ref:`rds + * ` + * option. Users may wish to override the default behavior in certain cases (for example when + * using CDS with a static route table). + */ + 'validate_clusters': (_google_protobuf_BoolValue__Output | null); + /** + * Specifies a list of HTTP headers that should be removed from each request + * routed by the HTTP connection manager. + */ + 'request_headers_to_remove': (string)[]; + /** + * An array of virtual hosts will be dynamically loaded via the VHDS API. + * Both ``virtual_hosts`` and ``vhds`` fields will be used when present. ``virtual_hosts`` can be used + * for a base routing table or for infrequently changing virtual hosts. ``vhds`` is used for + * on-demand discovery of virtual hosts. The contents of these two fields will be merged to + * generate a routing table for a given RouteConfiguration, with ``vhds`` derived configuration + * taking precedence. + */ + 'vhds': (_envoy_config_route_v3_Vhds__Output | null); + /** + * By default, headers that should be added/removed are evaluated from most to least specific: + * + * * route level + * * virtual host level + * * connection manager level + * + * To allow setting overrides at the route or virtual host level, this order can be reversed + * by setting this option to true. Defaults to false. + */ + 'most_specific_header_mutations_wins': (boolean); + /** + * The maximum bytes of the response :ref:`direct response body + * ` size. If not specified the default + * is 4096. + * + * .. warning:: + * + * Envoy currently holds the content of :ref:`direct response body + * ` in memory. Be careful setting + * this to be larger than the default 4KB, since the allocated memory for direct response body + * is not subject to data plane buffering controls. + */ + 'max_direct_response_body_size_bytes': (_google_protobuf_UInt32Value__Output | null); + /** + * A list of plugins and their configurations which may be used by a + * :ref:`cluster specifier plugin name ` + * within the route. All ``extension.name`` fields in this list must be unique. + */ + 'cluster_specifier_plugins': (_envoy_config_route_v3_ClusterSpecifierPlugin__Output)[]; + /** + * Specify a set of default request mirroring policies which apply to all routes under its virtual hosts. + * Note that policies are not merged, the most specific non-empty one becomes the mirror policies. + */ + 'request_mirror_policies': (_envoy_config_route_v3_RouteAction_RequestMirrorPolicy__Output)[]; + /** + * By default, port in :authority header (if any) is used in host matching. + * With this option enabled, Envoy will ignore the port number in the :authority header (if any) when picking VirtualHost. + * NOTE: this option will not strip the port number (if any) contained in route config + * :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`.domains field. + */ + 'ignore_port_in_host_matching': (boolean); + /** + * Ignore path-parameters in path-matching. + * Before RFC3986, URI were like(RFC1808): :///;?# + * Envoy by default takes ":path" as ";". + * For users who want to only match path on the "" portion, this option should be true. + */ + 'ignore_path_parameters_in_path_matching': (boolean); + /** + * The typed_per_filter_config field can be used to provide RouteConfiguration level per filter config. + * The key should match the :ref:`filter config name + * `. + * The canonical filter name (e.g., ``envoy.filters.http.buffer`` for the HTTP buffer filter) can also + * be used for the backwards compatibility. If there is no entry referred by the filter config name, the + * entry referred by the canonical filter name will be provided to the filters as fallback. + * + * Use of this field is filter specific; + * see the :ref:`HTTP filter documentation ` for if and how it is utilized. + * [#comment: An entry's value may be wrapped in a + * :ref:`FilterConfig` + * message to specify additional options.] + */ + 'typed_per_filter_config': ({[key: string]: _google_protobuf_Any__Output}); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/route/v3/RouteList.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/RouteList.ts new file mode 100644 index 000000000..676d38554 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/RouteList.ts @@ -0,0 +1,25 @@ +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto + +import type { Route as _envoy_config_route_v3_Route, Route__Output as _envoy_config_route_v3_Route__Output } from '../../../../envoy/config/route/v3/Route'; + +/** + * This can be used in route matcher :ref:`VirtualHost.matcher `. + * When the matcher matches, routes will be matched and run. + */ +export interface RouteList { + /** + * The list of routes that will be matched and run, in order. The first route that matches will be used. + */ + 'routes'?: (_envoy_config_route_v3_Route)[]; +} + +/** + * This can be used in route matcher :ref:`VirtualHost.matcher `. + * When the matcher matches, routes will be matched and run. + */ +export interface RouteList__Output { + /** + * The list of routes that will be matched and run, in order. The first route that matches will be used. + */ + 'routes': (_envoy_config_route_v3_Route__Output)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/route/v3/RouteMatch.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/RouteMatch.ts new file mode 100644 index 000000000..06982a82f --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/RouteMatch.ts @@ -0,0 +1,309 @@ +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto + +import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../google/protobuf/BoolValue'; +import type { HeaderMatcher as _envoy_config_route_v3_HeaderMatcher, HeaderMatcher__Output as _envoy_config_route_v3_HeaderMatcher__Output } from '../../../../envoy/config/route/v3/HeaderMatcher'; +import type { QueryParameterMatcher as _envoy_config_route_v3_QueryParameterMatcher, QueryParameterMatcher__Output as _envoy_config_route_v3_QueryParameterMatcher__Output } from '../../../../envoy/config/route/v3/QueryParameterMatcher'; +import type { RuntimeFractionalPercent as _envoy_config_core_v3_RuntimeFractionalPercent, RuntimeFractionalPercent__Output as _envoy_config_core_v3_RuntimeFractionalPercent__Output } from '../../../../envoy/config/core/v3/RuntimeFractionalPercent'; +import type { RegexMatcher as _envoy_type_matcher_v3_RegexMatcher, RegexMatcher__Output as _envoy_type_matcher_v3_RegexMatcher__Output } from '../../../../envoy/type/matcher/v3/RegexMatcher'; +import type { MetadataMatcher as _envoy_type_matcher_v3_MetadataMatcher, MetadataMatcher__Output as _envoy_type_matcher_v3_MetadataMatcher__Output } from '../../../../envoy/type/matcher/v3/MetadataMatcher'; +import type { TypedExtensionConfig as _envoy_config_core_v3_TypedExtensionConfig, TypedExtensionConfig__Output as _envoy_config_core_v3_TypedExtensionConfig__Output } from '../../../../envoy/config/core/v3/TypedExtensionConfig'; + +/** + * An extensible message for matching CONNECT requests. + */ +export interface _envoy_config_route_v3_RouteMatch_ConnectMatcher { +} + +/** + * An extensible message for matching CONNECT requests. + */ +export interface _envoy_config_route_v3_RouteMatch_ConnectMatcher__Output { +} + +export interface _envoy_config_route_v3_RouteMatch_GrpcRouteMatchOptions { +} + +export interface _envoy_config_route_v3_RouteMatch_GrpcRouteMatchOptions__Output { +} + +export interface _envoy_config_route_v3_RouteMatch_TlsContextMatchOptions { + /** + * If specified, the route will match against whether or not a certificate is presented. + * If not specified, certificate presentation status (true or false) will not be considered when route matching. + */ + 'presented'?: (_google_protobuf_BoolValue | null); + /** + * If specified, the route will match against whether or not a certificate is validated. + * If not specified, certificate validation status (true or false) will not be considered when route matching. + */ + 'validated'?: (_google_protobuf_BoolValue | null); +} + +export interface _envoy_config_route_v3_RouteMatch_TlsContextMatchOptions__Output { + /** + * If specified, the route will match against whether or not a certificate is presented. + * If not specified, certificate presentation status (true or false) will not be considered when route matching. + */ + 'presented': (_google_protobuf_BoolValue__Output | null); + /** + * If specified, the route will match against whether or not a certificate is validated. + * If not specified, certificate validation status (true or false) will not be considered when route matching. + */ + 'validated': (_google_protobuf_BoolValue__Output | null); +} + +/** + * [#next-free-field: 16] + */ +export interface RouteMatch { + /** + * If specified, the route is a prefix rule meaning that the prefix must + * match the beginning of the ``:path`` header. + */ + 'prefix'?: (string); + /** + * If specified, the route is an exact path rule meaning that the path must + * exactly match the ``:path`` header once the query string is removed. + */ + 'path'?: (string); + /** + * Indicates that prefix/path matching should be case sensitive. The default + * is true. Ignored for safe_regex matching. + */ + 'case_sensitive'?: (_google_protobuf_BoolValue | null); + /** + * Specifies a set of headers that the route should match on. The router will + * check the request’s headers against all the specified headers in the route + * config. A match will happen if all the headers in the route are present in + * the request with the same values (or based on presence if the value field + * is not in the config). + */ + 'headers'?: (_envoy_config_route_v3_HeaderMatcher)[]; + /** + * Specifies a set of URL query parameters on which the route should + * match. The router will check the query string from the ``path`` header + * against all the specified query parameters. If the number of specified + * query parameters is nonzero, they all must match the ``path`` header's + * query string for a match to occur. + * + * .. note:: + * + * If query parameters are used to pass request message fields when + * `grpc_json_transcoder `_ + * is used, the transcoded message fields maybe different. The query parameters are + * url encoded, but the message fields are not. For example, if a query + * parameter is "foo%20bar", the message field will be "foo bar". + */ + 'query_parameters'?: (_envoy_config_route_v3_QueryParameterMatcher)[]; + /** + * If specified, only gRPC requests will be matched. The router will check + * that the content-type header has a application/grpc or one of the various + * application/grpc+ values. + */ + 'grpc'?: (_envoy_config_route_v3_RouteMatch_GrpcRouteMatchOptions | null); + /** + * Indicates that the route should additionally match on a runtime key. Every time the route + * is considered for a match, it must also fall under the percentage of matches indicated by + * this field. For some fraction N/D, a random number in the range [0,D) is selected. If the + * number is <= the value of the numerator N, or if the key is not present, the default + * value, the router continues to evaluate the remaining match criteria. A runtime_fraction + * route configuration can be used to roll out route changes in a gradual manner without full + * code/config deploys. Refer to the :ref:`traffic shifting + * ` docs for additional documentation. + * + * .. note:: + * + * Parsing this field is implemented such that the runtime key's data may be represented + * as a FractionalPercent proto represented as JSON/YAML and may also be represented as an + * integer with the assumption that the value is an integral percentage out of 100. For + * instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent + * whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. + */ + 'runtime_fraction'?: (_envoy_config_core_v3_RuntimeFractionalPercent | null); + /** + * If specified, the route is a regular expression rule meaning that the + * regex must match the ``:path`` header once the query string is removed. The entire path + * (without the query string) must match the regex. The rule will not match if only a + * subsequence of the ``:path`` header matches the regex. + * + * [#next-major-version: In the v3 API we should redo how path specification works such + * that we utilize StringMatcher, and additionally have consistent options around whether we + * strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive + * to deprecate the existing options. We should even consider whether we want to do away with + * path_specifier entirely and just rely on a set of header matchers which can already match + * on :path, etc. The issue with that is it is unclear how to generically deal with query string + * stripping. This needs more thought.] + */ + 'safe_regex'?: (_envoy_type_matcher_v3_RegexMatcher | null); + /** + * If specified, the client tls context will be matched against the defined + * match options. + * + * [#next-major-version: unify with RBAC] + */ + 'tls_context'?: (_envoy_config_route_v3_RouteMatch_TlsContextMatchOptions | null); + /** + * If this is used as the matcher, the matcher will only match CONNECT requests. + * Note that this will not match HTTP/2 upgrade-style CONNECT requests + * (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style + * upgrades. + * This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, + * where Extended CONNECT requests may have a path, the path matchers will work if + * there is a path present. + * Note that CONNECT support is currently considered alpha in Envoy. + * [#comment: TODO(htuch): Replace the above comment with an alpha tag.] + */ + 'connect_matcher'?: (_envoy_config_route_v3_RouteMatch_ConnectMatcher | null); + /** + * Specifies a set of dynamic metadata matchers on which the route should match. + * The router will check the dynamic metadata against all the specified dynamic metadata matchers. + * If the number of specified dynamic metadata matchers is nonzero, they all must match the + * dynamic metadata for a match to occur. + */ + 'dynamic_metadata'?: (_envoy_type_matcher_v3_MetadataMatcher)[]; + /** + * If specified, the route is a path-separated prefix rule meaning that the + * ``:path`` header (without the query string) must either exactly match the + * ``path_separated_prefix`` or have it as a prefix, followed by ``/`` + * + * For example, ``/api/dev`` would match + * ``/api/dev``, ``/api/dev/``, ``/api/dev/v1``, and ``/api/dev?param=true`` + * but would not match ``/api/developer`` + * + * Expect the value to not contain ``?`` or ``#`` and not to end in ``/`` + */ + 'path_separated_prefix'?: (string); + /** + * [#extension-category: envoy.path.match] + */ + 'path_match_policy'?: (_envoy_config_core_v3_TypedExtensionConfig | null); + 'path_specifier'?: "prefix"|"path"|"safe_regex"|"connect_matcher"|"path_separated_prefix"|"path_match_policy"; +} + +/** + * [#next-free-field: 16] + */ +export interface RouteMatch__Output { + /** + * If specified, the route is a prefix rule meaning that the prefix must + * match the beginning of the ``:path`` header. + */ + 'prefix'?: (string); + /** + * If specified, the route is an exact path rule meaning that the path must + * exactly match the ``:path`` header once the query string is removed. + */ + 'path'?: (string); + /** + * Indicates that prefix/path matching should be case sensitive. The default + * is true. Ignored for safe_regex matching. + */ + 'case_sensitive': (_google_protobuf_BoolValue__Output | null); + /** + * Specifies a set of headers that the route should match on. The router will + * check the request’s headers against all the specified headers in the route + * config. A match will happen if all the headers in the route are present in + * the request with the same values (or based on presence if the value field + * is not in the config). + */ + 'headers': (_envoy_config_route_v3_HeaderMatcher__Output)[]; + /** + * Specifies a set of URL query parameters on which the route should + * match. The router will check the query string from the ``path`` header + * against all the specified query parameters. If the number of specified + * query parameters is nonzero, they all must match the ``path`` header's + * query string for a match to occur. + * + * .. note:: + * + * If query parameters are used to pass request message fields when + * `grpc_json_transcoder `_ + * is used, the transcoded message fields maybe different. The query parameters are + * url encoded, but the message fields are not. For example, if a query + * parameter is "foo%20bar", the message field will be "foo bar". + */ + 'query_parameters': (_envoy_config_route_v3_QueryParameterMatcher__Output)[]; + /** + * If specified, only gRPC requests will be matched. The router will check + * that the content-type header has a application/grpc or one of the various + * application/grpc+ values. + */ + 'grpc': (_envoy_config_route_v3_RouteMatch_GrpcRouteMatchOptions__Output | null); + /** + * Indicates that the route should additionally match on a runtime key. Every time the route + * is considered for a match, it must also fall under the percentage of matches indicated by + * this field. For some fraction N/D, a random number in the range [0,D) is selected. If the + * number is <= the value of the numerator N, or if the key is not present, the default + * value, the router continues to evaluate the remaining match criteria. A runtime_fraction + * route configuration can be used to roll out route changes in a gradual manner without full + * code/config deploys. Refer to the :ref:`traffic shifting + * ` docs for additional documentation. + * + * .. note:: + * + * Parsing this field is implemented such that the runtime key's data may be represented + * as a FractionalPercent proto represented as JSON/YAML and may also be represented as an + * integer with the assumption that the value is an integral percentage out of 100. For + * instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent + * whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. + */ + 'runtime_fraction': (_envoy_config_core_v3_RuntimeFractionalPercent__Output | null); + /** + * If specified, the route is a regular expression rule meaning that the + * regex must match the ``:path`` header once the query string is removed. The entire path + * (without the query string) must match the regex. The rule will not match if only a + * subsequence of the ``:path`` header matches the regex. + * + * [#next-major-version: In the v3 API we should redo how path specification works such + * that we utilize StringMatcher, and additionally have consistent options around whether we + * strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive + * to deprecate the existing options. We should even consider whether we want to do away with + * path_specifier entirely and just rely on a set of header matchers which can already match + * on :path, etc. The issue with that is it is unclear how to generically deal with query string + * stripping. This needs more thought.] + */ + 'safe_regex'?: (_envoy_type_matcher_v3_RegexMatcher__Output | null); + /** + * If specified, the client tls context will be matched against the defined + * match options. + * + * [#next-major-version: unify with RBAC] + */ + 'tls_context': (_envoy_config_route_v3_RouteMatch_TlsContextMatchOptions__Output | null); + /** + * If this is used as the matcher, the matcher will only match CONNECT requests. + * Note that this will not match HTTP/2 upgrade-style CONNECT requests + * (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style + * upgrades. + * This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, + * where Extended CONNECT requests may have a path, the path matchers will work if + * there is a path present. + * Note that CONNECT support is currently considered alpha in Envoy. + * [#comment: TODO(htuch): Replace the above comment with an alpha tag.] + */ + 'connect_matcher'?: (_envoy_config_route_v3_RouteMatch_ConnectMatcher__Output | null); + /** + * Specifies a set of dynamic metadata matchers on which the route should match. + * The router will check the dynamic metadata against all the specified dynamic metadata matchers. + * If the number of specified dynamic metadata matchers is nonzero, they all must match the + * dynamic metadata for a match to occur. + */ + 'dynamic_metadata': (_envoy_type_matcher_v3_MetadataMatcher__Output)[]; + /** + * If specified, the route is a path-separated prefix rule meaning that the + * ``:path`` header (without the query string) must either exactly match the + * ``path_separated_prefix`` or have it as a prefix, followed by ``/`` + * + * For example, ``/api/dev`` would match + * ``/api/dev``, ``/api/dev/``, ``/api/dev/v1``, and ``/api/dev?param=true`` + * but would not match ``/api/developer`` + * + * Expect the value to not contain ``?`` or ``#`` and not to end in ``/`` + */ + 'path_separated_prefix'?: (string); + /** + * [#extension-category: envoy.path.match] + */ + 'path_match_policy'?: (_envoy_config_core_v3_TypedExtensionConfig__Output | null); + 'path_specifier': "prefix"|"path"|"safe_regex"|"connect_matcher"|"path_separated_prefix"|"path_match_policy"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/route/v3/ScopedRouteConfiguration.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/ScopedRouteConfiguration.ts new file mode 100644 index 000000000..e13c537d4 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/ScopedRouteConfiguration.ts @@ -0,0 +1,231 @@ +// Original file: deps/envoy-api/envoy/config/route/v3/scoped_route.proto + +import type { RouteConfiguration as _envoy_config_route_v3_RouteConfiguration, RouteConfiguration__Output as _envoy_config_route_v3_RouteConfiguration__Output } from '../../../../envoy/config/route/v3/RouteConfiguration'; + +export interface _envoy_config_route_v3_ScopedRouteConfiguration_Key_Fragment { + /** + * A string to match against. + */ + 'string_key'?: (string); + 'type'?: "string_key"; +} + +export interface _envoy_config_route_v3_ScopedRouteConfiguration_Key_Fragment__Output { + /** + * A string to match against. + */ + 'string_key'?: (string); + 'type': "string_key"; +} + +/** + * Specifies a key which is matched against the output of the + * :ref:`scope_key_builder` + * specified in the HttpConnectionManager. The matching is done per HTTP + * request and is dependent on the order of the fragments contained in the + * Key. + */ +export interface _envoy_config_route_v3_ScopedRouteConfiguration_Key { + /** + * The ordered set of fragments to match against. The order must match the + * fragments in the corresponding + * :ref:`scope_key_builder`. + */ + 'fragments'?: (_envoy_config_route_v3_ScopedRouteConfiguration_Key_Fragment)[]; +} + +/** + * Specifies a key which is matched against the output of the + * :ref:`scope_key_builder` + * specified in the HttpConnectionManager. The matching is done per HTTP + * request and is dependent on the order of the fragments contained in the + * Key. + */ +export interface _envoy_config_route_v3_ScopedRouteConfiguration_Key__Output { + /** + * The ordered set of fragments to match against. The order must match the + * fragments in the corresponding + * :ref:`scope_key_builder`. + */ + 'fragments': (_envoy_config_route_v3_ScopedRouteConfiguration_Key_Fragment__Output)[]; +} + +/** + * Specifies a routing scope, which associates a + * :ref:`Key` to a + * :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. + * The :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` can be obtained dynamically + * via RDS (:ref:`route_configuration_name`) + * or specified inline (:ref:`route_configuration`). + * + * The HTTP connection manager builds up a table consisting of these Key to + * RouteConfiguration mappings, and looks up the RouteConfiguration to use per + * request according to the algorithm specified in the + * :ref:`scope_key_builder` + * assigned to the HttpConnectionManager. + * + * For example, with the following configurations (in YAML): + * + * HttpConnectionManager config: + * + * .. code:: + * + * ... + * scoped_routes: + * name: foo-scoped-routes + * scope_key_builder: + * fragments: + * - header_value_extractor: + * name: X-Route-Selector + * element_separator: , + * element: + * separator: = + * key: vip + * + * ScopedRouteConfiguration resources (specified statically via + * :ref:`scoped_route_configurations_list` + * or obtained dynamically via SRDS): + * + * .. code:: + * + * (1) + * name: route-scope1 + * route_configuration_name: route-config1 + * key: + * fragments: + * - string_key: 172.10.10.20 + * + * (2) + * name: route-scope2 + * route_configuration_name: route-config2 + * key: + * fragments: + * - string_key: 172.20.20.30 + * + * A request from a client such as: + * + * .. code:: + * + * GET / HTTP/1.1 + * Host: foo.com + * X-Route-Selector: vip=172.10.10.20 + * + * would result in the routing table defined by the ``route-config1`` + * RouteConfiguration being assigned to the HTTP request/stream. + * + * [#next-free-field: 6] + */ +export interface ScopedRouteConfiguration { + /** + * The name assigned to the routing scope. + */ + 'name'?: (string); + /** + * The resource name to use for a :ref:`envoy_v3_api_msg_service.discovery.v3.DiscoveryRequest` to an + * RDS server to fetch the :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` associated + * with this scope. + */ + 'route_configuration_name'?: (string); + /** + * The key to match against. + */ + 'key'?: (_envoy_config_route_v3_ScopedRouteConfiguration_Key | null); + /** + * Whether the RouteConfiguration should be loaded on demand. + */ + 'on_demand'?: (boolean); + /** + * The :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` associated with the scope. + */ + 'route_configuration'?: (_envoy_config_route_v3_RouteConfiguration | null); +} + +/** + * Specifies a routing scope, which associates a + * :ref:`Key` to a + * :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. + * The :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` can be obtained dynamically + * via RDS (:ref:`route_configuration_name`) + * or specified inline (:ref:`route_configuration`). + * + * The HTTP connection manager builds up a table consisting of these Key to + * RouteConfiguration mappings, and looks up the RouteConfiguration to use per + * request according to the algorithm specified in the + * :ref:`scope_key_builder` + * assigned to the HttpConnectionManager. + * + * For example, with the following configurations (in YAML): + * + * HttpConnectionManager config: + * + * .. code:: + * + * ... + * scoped_routes: + * name: foo-scoped-routes + * scope_key_builder: + * fragments: + * - header_value_extractor: + * name: X-Route-Selector + * element_separator: , + * element: + * separator: = + * key: vip + * + * ScopedRouteConfiguration resources (specified statically via + * :ref:`scoped_route_configurations_list` + * or obtained dynamically via SRDS): + * + * .. code:: + * + * (1) + * name: route-scope1 + * route_configuration_name: route-config1 + * key: + * fragments: + * - string_key: 172.10.10.20 + * + * (2) + * name: route-scope2 + * route_configuration_name: route-config2 + * key: + * fragments: + * - string_key: 172.20.20.30 + * + * A request from a client such as: + * + * .. code:: + * + * GET / HTTP/1.1 + * Host: foo.com + * X-Route-Selector: vip=172.10.10.20 + * + * would result in the routing table defined by the ``route-config1`` + * RouteConfiguration being assigned to the HTTP request/stream. + * + * [#next-free-field: 6] + */ +export interface ScopedRouteConfiguration__Output { + /** + * The name assigned to the routing scope. + */ + 'name': (string); + /** + * The resource name to use for a :ref:`envoy_v3_api_msg_service.discovery.v3.DiscoveryRequest` to an + * RDS server to fetch the :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` associated + * with this scope. + */ + 'route_configuration_name': (string); + /** + * The key to match against. + */ + 'key': (_envoy_config_route_v3_ScopedRouteConfiguration_Key__Output | null); + /** + * Whether the RouteConfiguration should be loaded on demand. + */ + 'on_demand': (boolean); + /** + * The :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration` associated with the scope. + */ + 'route_configuration': (_envoy_config_route_v3_RouteConfiguration__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/Tracing.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/Tracing.ts similarity index 72% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/route/Tracing.ts rename to packages/grpc-js-xds/src/generated/envoy/config/route/v3/Tracing.ts index 18b063339..29995243b 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/Tracing.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/Tracing.ts @@ -1,18 +1,18 @@ -// Original file: deps/envoy-api/envoy/api/v2/route/route_components.proto +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto -import type { FractionalPercent as _envoy_type_FractionalPercent, FractionalPercent__Output as _envoy_type_FractionalPercent__Output } from '../../../../envoy/type/FractionalPercent'; -import type { CustomTag as _envoy_type_tracing_v2_CustomTag, CustomTag__Output as _envoy_type_tracing_v2_CustomTag__Output } from '../../../../envoy/type/tracing/v2/CustomTag'; +import type { FractionalPercent as _envoy_type_v3_FractionalPercent, FractionalPercent__Output as _envoy_type_v3_FractionalPercent__Output } from '../../../../envoy/type/v3/FractionalPercent'; +import type { CustomTag as _envoy_type_tracing_v3_CustomTag, CustomTag__Output as _envoy_type_tracing_v3_CustomTag__Output } from '../../../../envoy/type/tracing/v3/CustomTag'; export interface Tracing { /** * Target percentage of requests managed by this HTTP connection manager that will be force * traced if the :ref:`x-client-trace-id ` * header is set. This field is a direct analog for the runtime variable - * 'tracing.client_sampling' in the :ref:`HTTP Connection Manager + * 'tracing.client_enabled' in the :ref:`HTTP Connection Manager * `. * Default: 100% */ - 'client_sampling'?: (_envoy_type_FractionalPercent); + 'client_sampling'?: (_envoy_type_v3_FractionalPercent | null); /** * Target percentage of requests managed by this HTTP connection manager that will be randomly * selected for trace generation, if not requested by the client or not forced. This field is @@ -20,7 +20,7 @@ export interface Tracing { * :ref:`HTTP Connection Manager `. * Default: 100% */ - 'random_sampling'?: (_envoy_type_FractionalPercent); + 'random_sampling'?: (_envoy_type_v3_FractionalPercent | null); /** * Target percentage of requests managed by this HTTP connection manager that will be traced * after all other sampling checks have been applied (client-directed, force tracing, random @@ -31,16 +31,16 @@ export interface Tracing { * :ref:`HTTP Connection Manager `. * Default: 100% */ - 'overall_sampling'?: (_envoy_type_FractionalPercent); + 'overall_sampling'?: (_envoy_type_v3_FractionalPercent | null); /** * A list of custom tags with unique tag name to create tags for the active span. * It will take effect after merging with the :ref:`corresponding configuration - * ` + * ` * configured in the HTTP connection manager. If two tags with the same name are configured * each in the HTTP connection manager and the route level, the one configured here takes * priority. */ - 'custom_tags'?: (_envoy_type_tracing_v2_CustomTag)[]; + 'custom_tags'?: (_envoy_type_tracing_v3_CustomTag)[]; } export interface Tracing__Output { @@ -48,11 +48,11 @@ export interface Tracing__Output { * Target percentage of requests managed by this HTTP connection manager that will be force * traced if the :ref:`x-client-trace-id ` * header is set. This field is a direct analog for the runtime variable - * 'tracing.client_sampling' in the :ref:`HTTP Connection Manager + * 'tracing.client_enabled' in the :ref:`HTTP Connection Manager * `. * Default: 100% */ - 'client_sampling'?: (_envoy_type_FractionalPercent__Output); + 'client_sampling': (_envoy_type_v3_FractionalPercent__Output | null); /** * Target percentage of requests managed by this HTTP connection manager that will be randomly * selected for trace generation, if not requested by the client or not forced. This field is @@ -60,7 +60,7 @@ export interface Tracing__Output { * :ref:`HTTP Connection Manager `. * Default: 100% */ - 'random_sampling'?: (_envoy_type_FractionalPercent__Output); + 'random_sampling': (_envoy_type_v3_FractionalPercent__Output | null); /** * Target percentage of requests managed by this HTTP connection manager that will be traced * after all other sampling checks have been applied (client-directed, force tracing, random @@ -71,14 +71,14 @@ export interface Tracing__Output { * :ref:`HTTP Connection Manager `. * Default: 100% */ - 'overall_sampling'?: (_envoy_type_FractionalPercent__Output); + 'overall_sampling': (_envoy_type_v3_FractionalPercent__Output | null); /** * A list of custom tags with unique tag name to create tags for the active span. * It will take effect after merging with the :ref:`corresponding configuration - * ` + * ` * configured in the HTTP connection manager. If two tags with the same name are configured * each in the HTTP connection manager and the route level, the one configured here takes * priority. */ - 'custom_tags': (_envoy_type_tracing_v2_CustomTag__Output)[]; + 'custom_tags': (_envoy_type_tracing_v3_CustomTag__Output)[]; } diff --git a/packages/grpc-js-xds/src/generated/envoy/config/route/v3/Vhds.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/Vhds.ts new file mode 100644 index 000000000..b8a37be65 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/Vhds.ts @@ -0,0 +1,17 @@ +// Original file: deps/envoy-api/envoy/config/route/v3/route.proto + +import type { ConfigSource as _envoy_config_core_v3_ConfigSource, ConfigSource__Output as _envoy_config_core_v3_ConfigSource__Output } from '../../../../envoy/config/core/v3/ConfigSource'; + +export interface Vhds { + /** + * Configuration source specifier for VHDS. + */ + 'config_source'?: (_envoy_config_core_v3_ConfigSource | null); +} + +export interface Vhds__Output { + /** + * Configuration source specifier for VHDS. + */ + 'config_source': (_envoy_config_core_v3_ConfigSource__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/VirtualCluster.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/VirtualCluster.ts similarity index 54% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/route/VirtualCluster.ts rename to packages/grpc-js-xds/src/generated/envoy/config/route/v3/VirtualCluster.ts index f072710ce..3c65d6a34 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/VirtualCluster.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/VirtualCluster.ts @@ -1,7 +1,6 @@ -// Original file: deps/envoy-api/envoy/api/v2/route/route_components.proto +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto -import type { RequestMethod as _envoy_api_v2_core_RequestMethod } from '../../../../envoy/api/v2/core/RequestMethod'; -import type { HeaderMatcher as _envoy_api_v2_route_HeaderMatcher, HeaderMatcher__Output as _envoy_api_v2_route_HeaderMatcher__Output } from '../../../../envoy/api/v2/route/HeaderMatcher'; +import type { HeaderMatcher as _envoy_config_route_v3_HeaderMatcher, HeaderMatcher__Output as _envoy_config_route_v3_HeaderMatcher__Output } from '../../../../envoy/config/route/v3/HeaderMatcher'; /** * A virtual cluster is a way of specifying a regex matching rule against @@ -23,42 +22,18 @@ import type { HeaderMatcher as _envoy_api_v2_route_HeaderMatcher, HeaderMatcher_ * statistics output are not free. */ export interface VirtualCluster { - /** - * Specifies a regex pattern to use for matching requests. The entire path of the request - * must match the regex. The regex grammar used is defined `here - * `_. - * - * Examples: - * - * * The regex ``/rides/\d+`` matches the path * /rides/0* - * * The regex ``/rides/\d+`` matches the path * /rides/123* - * * The regex ``/rides/\d+`` does not match the path * /rides/123/456* - * - * .. attention:: - * This field has been deprecated in favor of `headers` as it is not safe for use with - * untrusted input in all cases. - */ - 'pattern'?: (string); /** * Specifies the name of the virtual cluster. The virtual cluster name as well * as the virtual host name are used when emitting statistics. The statistics are emitted by the * router filter and are documented :ref:`here `. */ 'name'?: (string); - /** - * Optionally specifies the HTTP method to match on. For example GET, PUT, - * etc. - * - * .. attention:: - * This field has been deprecated in favor of `headers`. - */ - 'method'?: (_envoy_api_v2_core_RequestMethod | keyof typeof _envoy_api_v2_core_RequestMethod); /** * Specifies a list of header matchers to use for matching requests. Each specified header must - * match. The pseudo-headers `:path` and `:method` can be used to match the request path and + * match. The pseudo-headers ``:path`` and ``:method`` can be used to match the request path and * method, respectively. */ - 'headers'?: (_envoy_api_v2_route_HeaderMatcher)[]; + 'headers'?: (_envoy_config_route_v3_HeaderMatcher)[]; } /** @@ -81,40 +56,16 @@ export interface VirtualCluster { * statistics output are not free. */ export interface VirtualCluster__Output { - /** - * Specifies a regex pattern to use for matching requests. The entire path of the request - * must match the regex. The regex grammar used is defined `here - * `_. - * - * Examples: - * - * * The regex ``/rides/\d+`` matches the path * /rides/0* - * * The regex ``/rides/\d+`` matches the path * /rides/123* - * * The regex ``/rides/\d+`` does not match the path * /rides/123/456* - * - * .. attention:: - * This field has been deprecated in favor of `headers` as it is not safe for use with - * untrusted input in all cases. - */ - 'pattern': (string); /** * Specifies the name of the virtual cluster. The virtual cluster name as well * as the virtual host name are used when emitting statistics. The statistics are emitted by the * router filter and are documented :ref:`here `. */ 'name': (string); - /** - * Optionally specifies the HTTP method to match on. For example GET, PUT, - * etc. - * - * .. attention:: - * This field has been deprecated in favor of `headers`. - */ - 'method': (keyof typeof _envoy_api_v2_core_RequestMethod); /** * Specifies a list of header matchers to use for matching requests. Each specified header must - * match. The pseudo-headers `:path` and `:method` can be used to match the request path and + * match. The pseudo-headers ``:path`` and ``:method`` can be used to match the request path and * method, respectively. */ - 'headers': (_envoy_api_v2_route_HeaderMatcher__Output)[]; + 'headers': (_envoy_config_route_v3_HeaderMatcher__Output)[]; } diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/VirtualHost.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/VirtualHost.ts similarity index 53% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/route/VirtualHost.ts rename to packages/grpc-js-xds/src/generated/envoy/config/route/v3/VirtualHost.ts index ad806e949..5109be872 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/route/VirtualHost.ts +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/VirtualHost.ts @@ -1,34 +1,56 @@ -// Original file: deps/envoy-api/envoy/api/v2/route/route_components.proto +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto -import type { Route as _envoy_api_v2_route_Route, Route__Output as _envoy_api_v2_route_Route__Output } from '../../../../envoy/api/v2/route/Route'; -import type { VirtualCluster as _envoy_api_v2_route_VirtualCluster, VirtualCluster__Output as _envoy_api_v2_route_VirtualCluster__Output } from '../../../../envoy/api/v2/route/VirtualCluster'; -import type { RateLimit as _envoy_api_v2_route_RateLimit, RateLimit__Output as _envoy_api_v2_route_RateLimit__Output } from '../../../../envoy/api/v2/route/RateLimit'; -import type { HeaderValueOption as _envoy_api_v2_core_HeaderValueOption, HeaderValueOption__Output as _envoy_api_v2_core_HeaderValueOption__Output } from '../../../../envoy/api/v2/core/HeaderValueOption'; -import type { CorsPolicy as _envoy_api_v2_route_CorsPolicy, CorsPolicy__Output as _envoy_api_v2_route_CorsPolicy__Output } from '../../../../envoy/api/v2/route/CorsPolicy'; -import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../google/protobuf/Struct'; +import type { Route as _envoy_config_route_v3_Route, Route__Output as _envoy_config_route_v3_Route__Output } from '../../../../envoy/config/route/v3/Route'; +import type { VirtualCluster as _envoy_config_route_v3_VirtualCluster, VirtualCluster__Output as _envoy_config_route_v3_VirtualCluster__Output } from '../../../../envoy/config/route/v3/VirtualCluster'; +import type { RateLimit as _envoy_config_route_v3_RateLimit, RateLimit__Output as _envoy_config_route_v3_RateLimit__Output } from '../../../../envoy/config/route/v3/RateLimit'; +import type { HeaderValueOption as _envoy_config_core_v3_HeaderValueOption, HeaderValueOption__Output as _envoy_config_core_v3_HeaderValueOption__Output } from '../../../../envoy/config/core/v3/HeaderValueOption'; +import type { CorsPolicy as _envoy_config_route_v3_CorsPolicy, CorsPolicy__Output as _envoy_config_route_v3_CorsPolicy__Output } from '../../../../envoy/config/route/v3/CorsPolicy'; import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; -import type { RetryPolicy as _envoy_api_v2_route_RetryPolicy, RetryPolicy__Output as _envoy_api_v2_route_RetryPolicy__Output } from '../../../../envoy/api/v2/route/RetryPolicy'; -import type { HedgePolicy as _envoy_api_v2_route_HedgePolicy, HedgePolicy__Output as _envoy_api_v2_route_HedgePolicy__Output } from '../../../../envoy/api/v2/route/HedgePolicy'; +import type { RetryPolicy as _envoy_config_route_v3_RetryPolicy, RetryPolicy__Output as _envoy_config_route_v3_RetryPolicy__Output } from '../../../../envoy/config/route/v3/RetryPolicy'; +import type { HedgePolicy as _envoy_config_route_v3_HedgePolicy, HedgePolicy__Output as _envoy_config_route_v3_HedgePolicy__Output } from '../../../../envoy/config/route/v3/HedgePolicy'; import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; +import type { Matcher as _xds_type_matcher_v3_Matcher, Matcher__Output as _xds_type_matcher_v3_Matcher__Output } from '../../../../xds/type/matcher/v3/Matcher'; +import type { _envoy_config_route_v3_RouteAction_RequestMirrorPolicy, _envoy_config_route_v3_RouteAction_RequestMirrorPolicy__Output } from '../../../../envoy/config/route/v3/RouteAction'; -// Original file: deps/envoy-api/envoy/api/v2/route/route_components.proto +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto -export enum _envoy_api_v2_route_VirtualHost_TlsRequirementType { +export const _envoy_config_route_v3_VirtualHost_TlsRequirementType = { /** * No TLS requirement for the virtual host. */ - NONE = 0, + NONE: 'NONE', /** * External requests must use TLS. If a request is external and it is not * using TLS, a 301 redirect will be sent telling the client to use HTTPS. */ - EXTERNAL_ONLY = 1, + EXTERNAL_ONLY: 'EXTERNAL_ONLY', /** * All requests must use TLS. If a request is not using TLS, a 301 redirect * will be sent telling the client to use HTTPS. */ - ALL = 2, -} + ALL: 'ALL', +} as const; + +export type _envoy_config_route_v3_VirtualHost_TlsRequirementType = + /** + * No TLS requirement for the virtual host. + */ + | 'NONE' + | 0 + /** + * External requests must use TLS. If a request is external and it is not + * using TLS, a 301 redirect will be sent telling the client to use HTTPS. + */ + | 'EXTERNAL_ONLY' + | 1 + /** + * All requests must use TLS. If a request is not using TLS, a 301 redirect + * will be sent telling the client to use HTTPS. + */ + | 'ALL' + | 2 + +export type _envoy_config_route_v3_VirtualHost_TlsRequirementType__Output = typeof _envoy_config_route_v3_VirtualHost_TlsRequirementType[keyof typeof _envoy_config_route_v3_VirtualHost_TlsRequirementType] /** * The top level element in the routing configuration is a virtual host. Each virtual host has @@ -36,7 +58,7 @@ export enum _envoy_api_v2_route_VirtualHost_TlsRequirementType { * host header. This allows a single listener to service multiple top level domain path trees. Once * a virtual host is selected based on the domain, the routes are processed in order to see which * upstream cluster to route to or whether to perform a redirect. - * [#next-free-field: 21] + * [#next-free-field: 24] */ export interface VirtualHost { /** @@ -68,58 +90,60 @@ export interface VirtualHost { /** * The list of routes that will be matched, in order, for incoming requests. * The first route that matches will be used. + * Only one of this and ``matcher`` can be specified. */ - 'routes'?: (_envoy_api_v2_route_Route)[]; + 'routes'?: (_envoy_config_route_v3_Route)[]; /** * Specifies the type of TLS enforcement the virtual host expects. If this option is not * specified, there is no TLS requirement for the virtual host. */ - 'require_tls'?: (_envoy_api_v2_route_VirtualHost_TlsRequirementType | keyof typeof _envoy_api_v2_route_VirtualHost_TlsRequirementType); + 'require_tls'?: (_envoy_config_route_v3_VirtualHost_TlsRequirementType); /** * A list of virtual clusters defined for this virtual host. Virtual clusters * are used for additional statistics gathering. */ - 'virtual_clusters'?: (_envoy_api_v2_route_VirtualCluster)[]; + 'virtual_clusters'?: (_envoy_config_route_v3_VirtualCluster)[]; /** * Specifies a set of rate limit configurations that will be applied to the * virtual host. */ - 'rate_limits'?: (_envoy_api_v2_route_RateLimit)[]; + 'rate_limits'?: (_envoy_config_route_v3_RateLimit)[]; /** * Specifies a list of HTTP headers that should be added to each request * handled by this virtual host. Headers specified at this level are applied - * after headers from enclosed :ref:`envoy_api_msg_route.Route` and before headers from the - * enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including + * after headers from enclosed :ref:`envoy_v3_api_msg_config.route.v3.Route` and before headers from the + * enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including * details on header value syntax, see the documentation on :ref:`custom request headers * `. */ - 'request_headers_to_add'?: (_envoy_api_v2_core_HeaderValueOption)[]; + 'request_headers_to_add'?: (_envoy_config_core_v3_HeaderValueOption)[]; /** - * Indicates that the virtual host has a CORS policy. + * Indicates that the virtual host has a CORS policy. This field is ignored if related cors policy is + * found in the + * :ref:`VirtualHost.typed_per_filter_config`. + * + * .. attention:: + * + * This option has been deprecated. Please use + * :ref:`VirtualHost.typed_per_filter_config` + * to configure the CORS HTTP filter. + * @deprecated */ - 'cors'?: (_envoy_api_v2_route_CorsPolicy); + 'cors'?: (_envoy_config_route_v3_CorsPolicy | null); /** * Specifies a list of HTTP headers that should be added to each response * handled by this virtual host. Headers specified at this level are applied - * after headers from enclosed :ref:`envoy_api_msg_route.Route` and before headers from the - * enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including + * after headers from enclosed :ref:`envoy_v3_api_msg_config.route.v3.Route` and before headers from the + * enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including * details on header value syntax, see the documentation on :ref:`custom request headers * `. */ - 'response_headers_to_add'?: (_envoy_api_v2_core_HeaderValueOption)[]; + 'response_headers_to_add'?: (_envoy_config_core_v3_HeaderValueOption)[]; /** * Specifies a list of HTTP headers that should be removed from each response * handled by this virtual host. */ 'response_headers_to_remove'?: (string)[]; - /** - * The per_filter_config field can be used to provide virtual host-specific - * configurations for filters. The key should match the filter name, such as - * *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - * specific; see the :ref:`HTTP filter documentation ` - * for if and how it is utilized. - */ - 'per_filter_config'?: ({[key: string]: _google_protobuf_Struct}); /** * Specifies a list of HTTP headers that should be removed from each request * handled by this virtual host. @@ -133,17 +157,24 @@ export interface VirtualHost { * will see the attempt count as perceived by the second Envoy. Defaults to false. * This header is unaffected by the * :ref:`suppress_envoy_headers - * ` flag. + * ` flag. * * [#next-major-version: rename to include_attempt_count_in_request.] */ 'include_request_attempt_count'?: (boolean); /** - * The per_filter_config field can be used to provide virtual host-specific - * configurations for filters. The key should match the filter name, such as - * *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - * specific; see the :ref:`HTTP filter documentation ` - * for if and how it is utilized. + * The per_filter_config field can be used to provide virtual host-specific configurations for filters. + * The key should match the :ref:`filter config name + * `. + * The canonical filter name (e.g., ``envoy.filters.http.buffer`` for the HTTP buffer filter) can also + * be used for the backwards compatibility. If there is no entry referred by the filter config name, the + * entry referred by the canonical filter name will be provided to the filters as fallback. + * + * Use of this field is filter specific; + * see the :ref:`HTTP filter documentation ` for if and how it is utilized. + * [#comment: An entry's value may be wrapped in a + * :ref:`FilterConfig` + * message to specify additional options.] */ 'typed_per_filter_config'?: ({[key: string]: _google_protobuf_Any}); /** @@ -151,19 +182,19 @@ export interface VirtualHost { * route level entry will take precedence over this config and it'll be treated * independently (e.g.: values are not inherited). */ - 'retry_policy'?: (_envoy_api_v2_route_RetryPolicy); + 'retry_policy'?: (_envoy_config_route_v3_RetryPolicy | null); /** * Indicates the hedge policy for all routes in this virtual host. Note that setting a * route level entry will take precedence over this config and it'll be treated * independently (e.g.: values are not inherited). */ - 'hedge_policy'?: (_envoy_api_v2_route_HedgePolicy); + 'hedge_policy'?: (_envoy_config_route_v3_HedgePolicy | null); /** * The maximum bytes which will be buffered for retries and shadowing. * If set and a route-specific limit is not set, the bytes actually buffered will be the minimum * value of this and the listener per_connection_buffer_limit_bytes. */ - 'per_request_buffer_limit_bytes'?: (_google_protobuf_UInt32Value); + 'per_request_buffer_limit_bytes'?: (_google_protobuf_UInt32Value | null); /** * Decides whether the :ref:`x-envoy-attempt-count * ` header should be included @@ -172,17 +203,34 @@ export interface VirtualHost { * will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false. * This header is unaffected by the * :ref:`suppress_envoy_headers - * ` flag. + * ` flag. */ 'include_attempt_count_in_response'?: (boolean); /** * [#not-implemented-hide:] * Specifies the configuration for retry policy extension. Note that setting a route level entry * will take precedence over this config and it'll be treated independently (e.g.: values are not - * inherited). :ref:`Retry policy ` should not be + * inherited). :ref:`Retry policy ` should not be * set if this field is used. */ - 'retry_policy_typed_config'?: (_google_protobuf_Any); + 'retry_policy_typed_config'?: (_google_protobuf_Any | null); + /** + * [#next-major-version: This should be included in a oneof with routes wrapped in a message.] + * The match tree to use when resolving route actions for incoming requests. Only one of this and ``routes`` + * can be specified. + */ + 'matcher'?: (_xds_type_matcher_v3_Matcher | null); + /** + * Specify a set of default request mirroring policies for every route under this virtual host. + * It takes precedence over the route config mirror policy entirely. + * That is, policies are not merged, the most specific non-empty one becomes the mirror policies. + */ + 'request_mirror_policies'?: (_envoy_config_route_v3_RouteAction_RequestMirrorPolicy)[]; + /** + * Decides whether to include the :ref:`x-envoy-is-timeout-retry ` + * request header in retries initiated by per try timeouts. + */ + 'include_is_timeout_retry_header'?: (boolean); } /** @@ -191,7 +239,7 @@ export interface VirtualHost { * host header. This allows a single listener to service multiple top level domain path trees. Once * a virtual host is selected based on the domain, the routes are processed in order to see which * upstream cluster to route to or whether to perform a redirect. - * [#next-free-field: 21] + * [#next-free-field: 24] */ export interface VirtualHost__Output { /** @@ -223,58 +271,60 @@ export interface VirtualHost__Output { /** * The list of routes that will be matched, in order, for incoming requests. * The first route that matches will be used. + * Only one of this and ``matcher`` can be specified. */ - 'routes': (_envoy_api_v2_route_Route__Output)[]; + 'routes': (_envoy_config_route_v3_Route__Output)[]; /** * Specifies the type of TLS enforcement the virtual host expects. If this option is not * specified, there is no TLS requirement for the virtual host. */ - 'require_tls': (keyof typeof _envoy_api_v2_route_VirtualHost_TlsRequirementType); + 'require_tls': (_envoy_config_route_v3_VirtualHost_TlsRequirementType__Output); /** * A list of virtual clusters defined for this virtual host. Virtual clusters * are used for additional statistics gathering. */ - 'virtual_clusters': (_envoy_api_v2_route_VirtualCluster__Output)[]; + 'virtual_clusters': (_envoy_config_route_v3_VirtualCluster__Output)[]; /** * Specifies a set of rate limit configurations that will be applied to the * virtual host. */ - 'rate_limits': (_envoy_api_v2_route_RateLimit__Output)[]; + 'rate_limits': (_envoy_config_route_v3_RateLimit__Output)[]; /** * Specifies a list of HTTP headers that should be added to each request * handled by this virtual host. Headers specified at this level are applied - * after headers from enclosed :ref:`envoy_api_msg_route.Route` and before headers from the - * enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including + * after headers from enclosed :ref:`envoy_v3_api_msg_config.route.v3.Route` and before headers from the + * enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including * details on header value syntax, see the documentation on :ref:`custom request headers * `. */ - 'request_headers_to_add': (_envoy_api_v2_core_HeaderValueOption__Output)[]; + 'request_headers_to_add': (_envoy_config_core_v3_HeaderValueOption__Output)[]; /** - * Indicates that the virtual host has a CORS policy. + * Indicates that the virtual host has a CORS policy. This field is ignored if related cors policy is + * found in the + * :ref:`VirtualHost.typed_per_filter_config`. + * + * .. attention:: + * + * This option has been deprecated. Please use + * :ref:`VirtualHost.typed_per_filter_config` + * to configure the CORS HTTP filter. + * @deprecated */ - 'cors'?: (_envoy_api_v2_route_CorsPolicy__Output); + 'cors': (_envoy_config_route_v3_CorsPolicy__Output | null); /** * Specifies a list of HTTP headers that should be added to each response * handled by this virtual host. Headers specified at this level are applied - * after headers from enclosed :ref:`envoy_api_msg_route.Route` and before headers from the - * enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including + * after headers from enclosed :ref:`envoy_v3_api_msg_config.route.v3.Route` and before headers from the + * enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including * details on header value syntax, see the documentation on :ref:`custom request headers * `. */ - 'response_headers_to_add': (_envoy_api_v2_core_HeaderValueOption__Output)[]; + 'response_headers_to_add': (_envoy_config_core_v3_HeaderValueOption__Output)[]; /** * Specifies a list of HTTP headers that should be removed from each response * handled by this virtual host. */ 'response_headers_to_remove': (string)[]; - /** - * The per_filter_config field can be used to provide virtual host-specific - * configurations for filters. The key should match the filter name, such as - * *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - * specific; see the :ref:`HTTP filter documentation ` - * for if and how it is utilized. - */ - 'per_filter_config'?: ({[key: string]: _google_protobuf_Struct__Output}); /** * Specifies a list of HTTP headers that should be removed from each request * handled by this virtual host. @@ -288,37 +338,44 @@ export interface VirtualHost__Output { * will see the attempt count as perceived by the second Envoy. Defaults to false. * This header is unaffected by the * :ref:`suppress_envoy_headers - * ` flag. + * ` flag. * * [#next-major-version: rename to include_attempt_count_in_request.] */ 'include_request_attempt_count': (boolean); /** - * The per_filter_config field can be used to provide virtual host-specific - * configurations for filters. The key should match the filter name, such as - * *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter - * specific; see the :ref:`HTTP filter documentation ` - * for if and how it is utilized. + * The per_filter_config field can be used to provide virtual host-specific configurations for filters. + * The key should match the :ref:`filter config name + * `. + * The canonical filter name (e.g., ``envoy.filters.http.buffer`` for the HTTP buffer filter) can also + * be used for the backwards compatibility. If there is no entry referred by the filter config name, the + * entry referred by the canonical filter name will be provided to the filters as fallback. + * + * Use of this field is filter specific; + * see the :ref:`HTTP filter documentation ` for if and how it is utilized. + * [#comment: An entry's value may be wrapped in a + * :ref:`FilterConfig` + * message to specify additional options.] */ - 'typed_per_filter_config'?: ({[key: string]: _google_protobuf_Any__Output}); + 'typed_per_filter_config': ({[key: string]: _google_protobuf_Any__Output}); /** * Indicates the retry policy for all routes in this virtual host. Note that setting a * route level entry will take precedence over this config and it'll be treated * independently (e.g.: values are not inherited). */ - 'retry_policy'?: (_envoy_api_v2_route_RetryPolicy__Output); + 'retry_policy': (_envoy_config_route_v3_RetryPolicy__Output | null); /** * Indicates the hedge policy for all routes in this virtual host. Note that setting a * route level entry will take precedence over this config and it'll be treated * independently (e.g.: values are not inherited). */ - 'hedge_policy'?: (_envoy_api_v2_route_HedgePolicy__Output); + 'hedge_policy': (_envoy_config_route_v3_HedgePolicy__Output | null); /** * The maximum bytes which will be buffered for retries and shadowing. * If set and a route-specific limit is not set, the bytes actually buffered will be the minimum * value of this and the listener per_connection_buffer_limit_bytes. */ - 'per_request_buffer_limit_bytes'?: (_google_protobuf_UInt32Value__Output); + 'per_request_buffer_limit_bytes': (_google_protobuf_UInt32Value__Output | null); /** * Decides whether the :ref:`x-envoy-attempt-count * ` header should be included @@ -327,15 +384,32 @@ export interface VirtualHost__Output { * will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false. * This header is unaffected by the * :ref:`suppress_envoy_headers - * ` flag. + * ` flag. */ 'include_attempt_count_in_response': (boolean); /** * [#not-implemented-hide:] * Specifies the configuration for retry policy extension. Note that setting a route level entry * will take precedence over this config and it'll be treated independently (e.g.: values are not - * inherited). :ref:`Retry policy ` should not be + * inherited). :ref:`Retry policy ` should not be * set if this field is used. */ - 'retry_policy_typed_config'?: (_google_protobuf_Any__Output); + 'retry_policy_typed_config': (_google_protobuf_Any__Output | null); + /** + * [#next-major-version: This should be included in a oneof with routes wrapped in a message.] + * The match tree to use when resolving route actions for incoming requests. Only one of this and ``routes`` + * can be specified. + */ + 'matcher': (_xds_type_matcher_v3_Matcher__Output | null); + /** + * Specify a set of default request mirroring policies for every route under this virtual host. + * It takes precedence over the route config mirror policy entirely. + * That is, policies are not merged, the most specific non-empty one becomes the mirror policies. + */ + 'request_mirror_policies': (_envoy_config_route_v3_RouteAction_RequestMirrorPolicy__Output)[]; + /** + * Decides whether to include the :ref:`x-envoy-is-timeout-retry ` + * request header in retries initiated by per try timeouts. + */ + 'include_is_timeout_retry_header': (boolean); } diff --git a/packages/grpc-js-xds/src/generated/envoy/config/route/v3/WeightedCluster.ts b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/WeightedCluster.ts new file mode 100644 index 000000000..91f4c6aeb --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/route/v3/WeightedCluster.ts @@ -0,0 +1,290 @@ +// Original file: deps/envoy-api/envoy/config/route/v3/route_components.proto + +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; +import type { Metadata as _envoy_config_core_v3_Metadata, Metadata__Output as _envoy_config_core_v3_Metadata__Output } from '../../../../envoy/config/core/v3/Metadata'; +import type { HeaderValueOption as _envoy_config_core_v3_HeaderValueOption, HeaderValueOption__Output as _envoy_config_core_v3_HeaderValueOption__Output } from '../../../../envoy/config/core/v3/HeaderValueOption'; +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; + +/** + * [#next-free-field: 13] + */ +export interface _envoy_config_route_v3_WeightedCluster_ClusterWeight { + /** + * Only one of ``name`` and ``cluster_header`` may be specified. + * [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1}] + * Name of the upstream cluster. The cluster must exist in the + * :ref:`cluster manager configuration `. + */ + 'name'?: (string); + /** + * Only one of ``name`` and ``cluster_header`` may be specified. + * [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1 }] + * Envoy will determine the cluster to route to by reading the value of the + * HTTP header named by cluster_header from the request headers. If the + * header is not found or the referenced cluster does not exist, Envoy will + * return a 404 response. + * + * .. attention:: + * + * Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 + * ``Host`` header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. + * + * .. note:: + * + * If the header appears multiple times only the first value is used. + */ + 'cluster_header'?: (string); + /** + * The weight of the cluster. This value is relative to the other clusters' + * weights. When a request matches the route, the choice of an upstream cluster + * is determined by its weight. The sum of weights across all + * entries in the clusters array must be greater than 0, and must not exceed + * uint32_t maximal value (4294967295). + */ + 'weight'?: (_google_protobuf_UInt32Value | null); + /** + * Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in + * the upstream cluster with metadata matching what is set in this field will be considered for + * load balancing. Note that this will be merged with what's provided in + * :ref:`RouteAction.metadata_match `, with + * values here taking precedence. The filter name should be specified as ``envoy.lb``. + */ + 'metadata_match'?: (_envoy_config_core_v3_Metadata | null); + /** + * Specifies a list of headers to be added to requests when this cluster is selected + * through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. + * Headers specified at this level are applied before headers from the enclosing + * :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`, and + * :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on + * header value syntax, see the documentation on :ref:`custom request headers + * `. + */ + 'request_headers_to_add'?: (_envoy_config_core_v3_HeaderValueOption)[]; + /** + * Specifies a list of HTTP headers that should be removed from each request when + * this cluster is selected through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. + */ + 'request_headers_to_remove'?: (string)[]; + /** + * Specifies a list of headers to be added to responses when this cluster is selected + * through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. + * Headers specified at this level are applied before headers from the enclosing + * :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`, and + * :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on + * header value syntax, see the documentation on :ref:`custom request headers + * `. + */ + 'response_headers_to_add'?: (_envoy_config_core_v3_HeaderValueOption)[]; + /** + * Specifies a list of headers to be removed from responses when this cluster is selected + * through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. + */ + 'response_headers_to_remove'?: (string)[]; + /** + * The per_filter_config field can be used to provide weighted cluster-specific configurations + * for filters. + * The key should match the :ref:`filter config name + * `. + * The canonical filter name (e.g., ``envoy.filters.http.buffer`` for the HTTP buffer filter) can also + * be used for the backwards compatibility. If there is no entry referred by the filter config name, the + * entry referred by the canonical filter name will be provided to the filters as fallback. + * + * Use of this field is filter specific; + * see the :ref:`HTTP filter documentation ` for if and how it is utilized. + * [#comment: An entry's value may be wrapped in a + * :ref:`FilterConfig` + * message to specify additional options.] + */ + 'typed_per_filter_config'?: ({[key: string]: _google_protobuf_Any}); + /** + * Indicates that during forwarding, the host header will be swapped with + * this value. + */ + 'host_rewrite_literal'?: (string); + 'host_rewrite_specifier'?: "host_rewrite_literal"; +} + +/** + * [#next-free-field: 13] + */ +export interface _envoy_config_route_v3_WeightedCluster_ClusterWeight__Output { + /** + * Only one of ``name`` and ``cluster_header`` may be specified. + * [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1}] + * Name of the upstream cluster. The cluster must exist in the + * :ref:`cluster manager configuration `. + */ + 'name': (string); + /** + * Only one of ``name`` and ``cluster_header`` may be specified. + * [#next-major-version: Need to add back the validation rule: (validate.rules).string = {min_len: 1 }] + * Envoy will determine the cluster to route to by reading the value of the + * HTTP header named by cluster_header from the request headers. If the + * header is not found or the referenced cluster does not exist, Envoy will + * return a 404 response. + * + * .. attention:: + * + * Internally, Envoy always uses the HTTP/2 ``:authority`` header to represent the HTTP/1 + * ``Host`` header. Thus, if attempting to match on ``Host``, match on ``:authority`` instead. + * + * .. note:: + * + * If the header appears multiple times only the first value is used. + */ + 'cluster_header': (string); + /** + * The weight of the cluster. This value is relative to the other clusters' + * weights. When a request matches the route, the choice of an upstream cluster + * is determined by its weight. The sum of weights across all + * entries in the clusters array must be greater than 0, and must not exceed + * uint32_t maximal value (4294967295). + */ + 'weight': (_google_protobuf_UInt32Value__Output | null); + /** + * Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in + * the upstream cluster with metadata matching what is set in this field will be considered for + * load balancing. Note that this will be merged with what's provided in + * :ref:`RouteAction.metadata_match `, with + * values here taking precedence. The filter name should be specified as ``envoy.lb``. + */ + 'metadata_match': (_envoy_config_core_v3_Metadata__Output | null); + /** + * Specifies a list of headers to be added to requests when this cluster is selected + * through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. + * Headers specified at this level are applied before headers from the enclosing + * :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`, and + * :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on + * header value syntax, see the documentation on :ref:`custom request headers + * `. + */ + 'request_headers_to_add': (_envoy_config_core_v3_HeaderValueOption__Output)[]; + /** + * Specifies a list of HTTP headers that should be removed from each request when + * this cluster is selected through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. + */ + 'request_headers_to_remove': (string)[]; + /** + * Specifies a list of headers to be added to responses when this cluster is selected + * through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. + * Headers specified at this level are applied before headers from the enclosing + * :ref:`envoy_v3_api_msg_config.route.v3.Route`, :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`, and + * :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on + * header value syntax, see the documentation on :ref:`custom request headers + * `. + */ + 'response_headers_to_add': (_envoy_config_core_v3_HeaderValueOption__Output)[]; + /** + * Specifies a list of headers to be removed from responses when this cluster is selected + * through the enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. + */ + 'response_headers_to_remove': (string)[]; + /** + * The per_filter_config field can be used to provide weighted cluster-specific configurations + * for filters. + * The key should match the :ref:`filter config name + * `. + * The canonical filter name (e.g., ``envoy.filters.http.buffer`` for the HTTP buffer filter) can also + * be used for the backwards compatibility. If there is no entry referred by the filter config name, the + * entry referred by the canonical filter name will be provided to the filters as fallback. + * + * Use of this field is filter specific; + * see the :ref:`HTTP filter documentation ` for if and how it is utilized. + * [#comment: An entry's value may be wrapped in a + * :ref:`FilterConfig` + * message to specify additional options.] + */ + 'typed_per_filter_config': ({[key: string]: _google_protobuf_Any__Output}); + /** + * Indicates that during forwarding, the host header will be swapped with + * this value. + */ + 'host_rewrite_literal'?: (string); + 'host_rewrite_specifier': "host_rewrite_literal"; +} + +/** + * Compared to the :ref:`cluster ` field that specifies a + * single upstream cluster as the target of a request, the :ref:`weighted_clusters + * ` option allows for specification of + * multiple upstream clusters along with weights that indicate the percentage of + * traffic to be forwarded to each cluster. The router selects an upstream cluster based on the + * weights. + */ +export interface WeightedCluster { + /** + * Specifies one or more upstream clusters associated with the route. + */ + 'clusters'?: (_envoy_config_route_v3_WeightedCluster_ClusterWeight)[]; + /** + * Specifies the runtime key prefix that should be used to construct the + * runtime keys associated with each cluster. When the ``runtime_key_prefix`` is + * specified, the router will look for weights associated with each upstream + * cluster under the key ``runtime_key_prefix`` + ``.`` + ``cluster[i].name`` where + * ``cluster[i]`` denotes an entry in the clusters array field. If the runtime + * key for the cluster does not exist, the value specified in the + * configuration file will be used as the default weight. See the :ref:`runtime documentation + * ` for how key names map to the underlying implementation. + */ + 'runtime_key_prefix'?: (string); + /** + * Specifies the total weight across all clusters. The sum of all cluster weights must equal this + * value, if this is greater than 0. + * This field is now deprecated, and the client will use the sum of all + * cluster weights. It is up to the management server to supply the correct weights. + * @deprecated + */ + 'total_weight'?: (_google_protobuf_UInt32Value | null); + /** + * Specifies the header name that is used to look up the random value passed in the request header. + * This is used to ensure consistent cluster picking across multiple proxy levels for weighted traffic. + * If header is not present or invalid, Envoy will fall back to use the internally generated random value. + * This header is expected to be single-valued header as we only want to have one selected value throughout + * the process for the consistency. And the value is a unsigned number between 0 and UINT64_MAX. + */ + 'header_name'?: (string); + 'random_value_specifier'?: "header_name"; +} + +/** + * Compared to the :ref:`cluster ` field that specifies a + * single upstream cluster as the target of a request, the :ref:`weighted_clusters + * ` option allows for specification of + * multiple upstream clusters along with weights that indicate the percentage of + * traffic to be forwarded to each cluster. The router selects an upstream cluster based on the + * weights. + */ +export interface WeightedCluster__Output { + /** + * Specifies one or more upstream clusters associated with the route. + */ + 'clusters': (_envoy_config_route_v3_WeightedCluster_ClusterWeight__Output)[]; + /** + * Specifies the runtime key prefix that should be used to construct the + * runtime keys associated with each cluster. When the ``runtime_key_prefix`` is + * specified, the router will look for weights associated with each upstream + * cluster under the key ``runtime_key_prefix`` + ``.`` + ``cluster[i].name`` where + * ``cluster[i]`` denotes an entry in the clusters array field. If the runtime + * key for the cluster does not exist, the value specified in the + * configuration file will be used as the default weight. See the :ref:`runtime documentation + * ` for how key names map to the underlying implementation. + */ + 'runtime_key_prefix': (string); + /** + * Specifies the total weight across all clusters. The sum of all cluster weights must equal this + * value, if this is greater than 0. + * This field is now deprecated, and the client will use the sum of all + * cluster weights. It is up to the management server to supply the correct weights. + * @deprecated + */ + 'total_weight': (_google_protobuf_UInt32Value__Output | null); + /** + * Specifies the header name that is used to look up the random value passed in the request header. + * This is used to ensure consistent cluster picking across multiple proxy levels for weighted traffic. + * If header is not present or invalid, Envoy will fall back to use the internally generated random value. + * This header is expected to be single-valued header as we only want to have one selected value throughout + * the process for the consistency. And the value is a unsigned number between 0 and UINT64_MAX. + */ + 'header_name'?: (string); + 'random_value_specifier': "header_name"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/trace/v2/Tracing.ts b/packages/grpc-js-xds/src/generated/envoy/config/trace/v2/Tracing.ts deleted file mode 100644 index 629e3f1cc..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/config/trace/v2/Tracing.ts +++ /dev/null @@ -1,114 +0,0 @@ -// Original file: deps/envoy-api/envoy/config/trace/v2/http_tracer.proto - -import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../google/protobuf/Struct'; -import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; - -/** - * Configuration for an HTTP tracer provider used by Envoy. - * - * The configuration is defined by the - * :ref:`HttpConnectionManager.Tracing ` - * :ref:`provider ` - * field. - */ -export interface _envoy_config_trace_v2_Tracing_Http { - /** - * The name of the HTTP trace driver to instantiate. The name must match a - * supported HTTP trace driver. Built-in trace drivers: - * - * - *envoy.tracers.lightstep* - * - *envoy.tracers.zipkin* - * - *envoy.tracers.dynamic_ot* - * - *envoy.tracers.datadog* - * - *envoy.tracers.opencensus* - * - *envoy.tracers.xray* - */ - 'name'?: (string); - 'config'?: (_google_protobuf_Struct); - 'typed_config'?: (_google_protobuf_Any); - /** - * Trace driver specific configuration which depends on the driver being instantiated. - * See the trace drivers for examples: - * - * - :ref:`LightstepConfig ` - * - :ref:`ZipkinConfig ` - * - :ref:`DynamicOtConfig ` - * - :ref:`DatadogConfig ` - * - :ref:`OpenCensusConfig ` - * - :ref:`AWS X-Ray ` - */ - 'config_type'?: "config"|"typed_config"; -} - -/** - * Configuration for an HTTP tracer provider used by Envoy. - * - * The configuration is defined by the - * :ref:`HttpConnectionManager.Tracing ` - * :ref:`provider ` - * field. - */ -export interface _envoy_config_trace_v2_Tracing_Http__Output { - /** - * The name of the HTTP trace driver to instantiate. The name must match a - * supported HTTP trace driver. Built-in trace drivers: - * - * - *envoy.tracers.lightstep* - * - *envoy.tracers.zipkin* - * - *envoy.tracers.dynamic_ot* - * - *envoy.tracers.datadog* - * - *envoy.tracers.opencensus* - * - *envoy.tracers.xray* - */ - 'name': (string); - 'config'?: (_google_protobuf_Struct__Output); - 'typed_config'?: (_google_protobuf_Any__Output); - /** - * Trace driver specific configuration which depends on the driver being instantiated. - * See the trace drivers for examples: - * - * - :ref:`LightstepConfig ` - * - :ref:`ZipkinConfig ` - * - :ref:`DynamicOtConfig ` - * - :ref:`DatadogConfig ` - * - :ref:`OpenCensusConfig ` - * - :ref:`AWS X-Ray ` - */ - 'config_type': "config"|"typed_config"; -} - -/** - * The tracing configuration specifies settings for an HTTP tracer provider used by Envoy. - * - * Envoy may support other tracers in the future, but right now the HTTP tracer is the only one - * supported. - * - * .. attention:: - * - * Use of this message type has been deprecated in favor of direct use of - * :ref:`Tracing.Http `. - */ -export interface Tracing { - /** - * Provides configuration for the HTTP tracer. - */ - 'http'?: (_envoy_config_trace_v2_Tracing_Http); -} - -/** - * The tracing configuration specifies settings for an HTTP tracer provider used by Envoy. - * - * Envoy may support other tracers in the future, but right now the HTTP tracer is the only one - * supported. - * - * .. attention:: - * - * Use of this message type has been deprecated in favor of direct use of - * :ref:`Tracing.Http `. - */ -export interface Tracing__Output { - /** - * Provides configuration for the HTTP tracer. - */ - 'http'?: (_envoy_config_trace_v2_Tracing_Http__Output); -} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/trace/v3/Tracing.ts b/packages/grpc-js-xds/src/generated/envoy/config/trace/v3/Tracing.ts new file mode 100644 index 000000000..9b9859bc5 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/config/trace/v3/Tracing.ts @@ -0,0 +1,85 @@ +// Original file: deps/envoy-api/envoy/config/trace/v3/http_tracer.proto + +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; + +/** + * Configuration for an HTTP tracer provider used by Envoy. + * + * The configuration is defined by the + * :ref:`HttpConnectionManager.Tracing ` + * :ref:`provider ` + * field. + */ +export interface _envoy_config_trace_v3_Tracing_Http { + /** + * The name of the HTTP trace driver to instantiate. The name must match a + * supported HTTP trace driver. + * See the :ref:`extensions listed in typed_config below ` for the default list of the HTTP trace driver. + */ + 'name'?: (string); + 'typed_config'?: (_google_protobuf_Any | null); + /** + * Trace driver specific configuration which must be set according to the driver being instantiated. + * [#extension-category: envoy.tracers] + */ + 'config_type'?: "typed_config"; +} + +/** + * Configuration for an HTTP tracer provider used by Envoy. + * + * The configuration is defined by the + * :ref:`HttpConnectionManager.Tracing ` + * :ref:`provider ` + * field. + */ +export interface _envoy_config_trace_v3_Tracing_Http__Output { + /** + * The name of the HTTP trace driver to instantiate. The name must match a + * supported HTTP trace driver. + * See the :ref:`extensions listed in typed_config below ` for the default list of the HTTP trace driver. + */ + 'name': (string); + 'typed_config'?: (_google_protobuf_Any__Output | null); + /** + * Trace driver specific configuration which must be set according to the driver being instantiated. + * [#extension-category: envoy.tracers] + */ + 'config_type': "typed_config"; +} + +/** + * The tracing configuration specifies settings for an HTTP tracer provider used by Envoy. + * + * Envoy may support other tracers in the future, but right now the HTTP tracer is the only one + * supported. + * + * .. attention:: + * + * Use of this message type has been deprecated in favor of direct use of + * :ref:`Tracing.Http `. + */ +export interface Tracing { + /** + * Provides configuration for the HTTP tracer. + */ + 'http'?: (_envoy_config_trace_v3_Tracing_Http | null); +} + +/** + * The tracing configuration specifies settings for an HTTP tracer provider used by Envoy. + * + * Envoy may support other tracers in the future, but right now the HTTP tracer is the only one + * supported. + * + * .. attention:: + * + * Use of this message type has been deprecated in favor of direct use of + * :ref:`Tracing.Http `. + */ +export interface Tracing__Output { + /** + * Provides configuration for the HTTP tracer. + */ + 'http': (_envoy_config_trace_v3_Tracing_Http__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/AccessLogCommon.ts b/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/AccessLogCommon.ts new file mode 100644 index 000000000..7ca3c5b19 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/AccessLogCommon.ts @@ -0,0 +1,423 @@ +// Original file: deps/envoy-api/envoy/data/accesslog/v3/accesslog.proto + +import type { Address as _envoy_config_core_v3_Address, Address__Output as _envoy_config_core_v3_Address__Output } from '../../../../envoy/config/core/v3/Address'; +import type { TLSProperties as _envoy_data_accesslog_v3_TLSProperties, TLSProperties__Output as _envoy_data_accesslog_v3_TLSProperties__Output } from '../../../../envoy/data/accesslog/v3/TLSProperties'; +import type { Timestamp as _google_protobuf_Timestamp, Timestamp__Output as _google_protobuf_Timestamp__Output } from '../../../../google/protobuf/Timestamp'; +import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; +import type { ResponseFlags as _envoy_data_accesslog_v3_ResponseFlags, ResponseFlags__Output as _envoy_data_accesslog_v3_ResponseFlags__Output } from '../../../../envoy/data/accesslog/v3/ResponseFlags'; +import type { Metadata as _envoy_config_core_v3_Metadata, Metadata__Output as _envoy_config_core_v3_Metadata__Output } from '../../../../envoy/config/core/v3/Metadata'; +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; +import type { AccessLogType as _envoy_data_accesslog_v3_AccessLogType, AccessLogType__Output as _envoy_data_accesslog_v3_AccessLogType__Output } from '../../../../envoy/data/accesslog/v3/AccessLogType'; +import type { Long } from '@grpc/proto-loader'; + +/** + * Defines fields that are shared by all Envoy access logs. + * [#next-free-field: 34] + */ +export interface AccessLogCommon { + /** + * [#not-implemented-hide:] + * This field indicates the rate at which this log entry was sampled. + * Valid range is (0.0, 1.0]. + */ + 'sample_rate'?: (number | string); + /** + * This field is the remote/origin address on which the request from the user was received. + * Note: This may not be the physical peer. E.g, if the remote address is inferred from for + * example the x-forwarder-for header, proxy protocol, etc. + */ + 'downstream_remote_address'?: (_envoy_config_core_v3_Address | null); + /** + * This field is the local/destination address on which the request from the user was received. + */ + 'downstream_local_address'?: (_envoy_config_core_v3_Address | null); + /** + * If the connection is secure,S this field will contain TLS properties. + */ + 'tls_properties'?: (_envoy_data_accesslog_v3_TLSProperties | null); + /** + * The time that Envoy started servicing this request. This is effectively the time that the first + * downstream byte is received. + */ + 'start_time'?: (_google_protobuf_Timestamp | null); + /** + * Interval between the first downstream byte received and the last + * downstream byte received (i.e. time it takes to receive a request). + */ + 'time_to_last_rx_byte'?: (_google_protobuf_Duration | null); + /** + * Interval between the first downstream byte received and the first upstream byte sent. There may + * by considerable delta between ``time_to_last_rx_byte`` and this value due to filters. + * Additionally, the same caveats apply as documented in ``time_to_last_downstream_tx_byte`` about + * not accounting for kernel socket buffer time, etc. + */ + 'time_to_first_upstream_tx_byte'?: (_google_protobuf_Duration | null); + /** + * Interval between the first downstream byte received and the last upstream byte sent. There may + * by considerable delta between ``time_to_last_rx_byte`` and this value due to filters. + * Additionally, the same caveats apply as documented in ``time_to_last_downstream_tx_byte`` about + * not accounting for kernel socket buffer time, etc. + */ + 'time_to_last_upstream_tx_byte'?: (_google_protobuf_Duration | null); + /** + * Interval between the first downstream byte received and the first upstream + * byte received (i.e. time it takes to start receiving a response). + */ + 'time_to_first_upstream_rx_byte'?: (_google_protobuf_Duration | null); + /** + * Interval between the first downstream byte received and the last upstream + * byte received (i.e. time it takes to receive a complete response). + */ + 'time_to_last_upstream_rx_byte'?: (_google_protobuf_Duration | null); + /** + * Interval between the first downstream byte received and the first downstream byte sent. + * There may be a considerable delta between the ``time_to_first_upstream_rx_byte`` and this field + * due to filters. Additionally, the same caveats apply as documented in + * ``time_to_last_downstream_tx_byte`` about not accounting for kernel socket buffer time, etc. + */ + 'time_to_first_downstream_tx_byte'?: (_google_protobuf_Duration | null); + /** + * Interval between the first downstream byte received and the last downstream byte sent. + * Depending on protocol, buffering, windowing, filters, etc. there may be a considerable delta + * between ``time_to_last_upstream_rx_byte`` and this field. Note also that this is an approximate + * time. In the current implementation it does not include kernel socket buffer time. In the + * current implementation it also does not include send window buffering inside the HTTP/2 codec. + * In the future it is likely that work will be done to make this duration more accurate. + */ + 'time_to_last_downstream_tx_byte'?: (_google_protobuf_Duration | null); + /** + * The upstream remote/destination address that handles this exchange. This does not include + * retries. + */ + 'upstream_remote_address'?: (_envoy_config_core_v3_Address | null); + /** + * The upstream local/origin address that handles this exchange. This does not include retries. + */ + 'upstream_local_address'?: (_envoy_config_core_v3_Address | null); + /** + * The upstream cluster that ``upstream_remote_address`` belongs to. + */ + 'upstream_cluster'?: (string); + /** + * Flags indicating occurrences during request/response processing. + */ + 'response_flags'?: (_envoy_data_accesslog_v3_ResponseFlags | null); + /** + * All metadata encountered during request processing, including endpoint + * selection. + * + * This can be used to associate IDs attached to the various configurations + * used to process this request with the access log entry. For example, a + * route created from a higher level forwarding rule with some ID can place + * that ID in this field and cross reference later. It can also be used to + * determine if a canary endpoint was used or not. + */ + 'metadata'?: (_envoy_config_core_v3_Metadata | null); + /** + * If upstream connection failed due to transport socket (e.g. TLS handshake), provides the + * failure reason from the transport socket. The format of this field depends on the configured + * upstream transport socket. Common TLS failures are in + * :ref:`TLS trouble shooting `. + */ + 'upstream_transport_failure_reason'?: (string); + /** + * The name of the route + */ + 'route_name'?: (string); + /** + * This field is the downstream direct remote address on which the request from the user was + * received. Note: This is always the physical peer, even if the remote address is inferred from + * for example the x-forwarder-for header, proxy protocol, etc. + */ + 'downstream_direct_remote_address'?: (_envoy_config_core_v3_Address | null); + /** + * Map of filter state in stream info that have been configured to be logged. If the filter + * state serialized to any message other than ``google.protobuf.Any`` it will be packed into + * ``google.protobuf.Any``. + */ + 'filter_state_objects'?: ({[key: string]: _google_protobuf_Any}); + /** + * A list of custom tags, which annotate logs with additional information. + * To configure this value, users should configure + * :ref:`custom_tags `. + */ + 'custom_tags'?: ({[key: string]: string}); + /** + * For HTTP: Total duration in milliseconds of the request from the start time to the last byte out. + * For TCP: Total duration in milliseconds of the downstream connection. + * This is the total duration of the request (i.e., when the request's ActiveStream is destroyed) + * and may be longer than ``time_to_last_downstream_tx_byte``. + */ + 'duration'?: (_google_protobuf_Duration | null); + /** + * For HTTP: Number of times the request is attempted upstream. Note that the field is omitted when the request was never attempted upstream. + * For TCP: Number of times the connection request is attempted upstream. Note that the field is omitted when the connect request was never attempted upstream. + */ + 'upstream_request_attempt_count'?: (number); + /** + * Connection termination details may provide additional information about why the connection was terminated by Envoy for L4 reasons. + */ + 'connection_termination_details'?: (string); + /** + * Optional unique id of stream (TCP connection, long-live HTTP2 stream, HTTP request) for logging and tracing. + * This could be any format string that could be used to identify one stream. + */ + 'stream_id'?: (string); + /** + * If this log entry is final log entry that flushed after the stream completed or + * intermediate log entry that flushed periodically during the stream. + * There may be multiple intermediate log entries and only one final log entry for each + * long-live stream (TCP connection, long-live HTTP2 stream). + * And if it is necessary, unique ID or identifier can be added to the log entry + * :ref:`stream_id ` to + * correlate all these intermediate log entries and final log entry. + * + * .. attention:: + * + * This field is deprecated in favor of ``access_log_type`` for better indication of the + * type of the access log record. + * @deprecated + */ + 'intermediate_log_entry'?: (boolean); + /** + * If downstream connection in listener failed due to transport socket (e.g. TLS handshake), provides the + * failure reason from the transport socket. The format of this field depends on the configured downstream + * transport socket. Common TLS failures are in :ref:`TLS trouble shooting `. + */ + 'downstream_transport_failure_reason'?: (string); + /** + * For HTTP: Total number of bytes sent to the downstream by the http stream. + * For TCP: Total number of bytes sent to the downstream by the tcp proxy. + */ + 'downstream_wire_bytes_sent'?: (number | string | Long); + /** + * For HTTP: Total number of bytes received from the downstream by the http stream. Envoy over counts sizes of received HTTP/1.1 pipelined requests by adding up bytes of requests in the pipeline to the one currently being processed. + * For TCP: Total number of bytes received from the downstream by the tcp proxy. + */ + 'downstream_wire_bytes_received'?: (number | string | Long); + /** + * For HTTP: Total number of bytes sent to the upstream by the http stream. This value accumulates during upstream retries. + * For TCP: Total number of bytes sent to the upstream by the tcp proxy. + */ + 'upstream_wire_bytes_sent'?: (number | string | Long); + /** + * For HTTP: Total number of bytes received from the upstream by the http stream. + * For TCP: Total number of bytes sent to the upstream by the tcp proxy. + */ + 'upstream_wire_bytes_received'?: (number | string | Long); + /** + * The type of the access log, which indicates when the log was recorded. + * See :ref:`ACCESS_LOG_TYPE ` for the available values. + * In case the access log was recorded by a flow which does not correspond to one of the supported + * values, then the default value will be ``NotSet``. + * For more information about how access log behaves and when it is being recorded, + * please refer to :ref:`access logging `. + */ + 'access_log_type'?: (_envoy_data_accesslog_v3_AccessLogType); +} + +/** + * Defines fields that are shared by all Envoy access logs. + * [#next-free-field: 34] + */ +export interface AccessLogCommon__Output { + /** + * [#not-implemented-hide:] + * This field indicates the rate at which this log entry was sampled. + * Valid range is (0.0, 1.0]. + */ + 'sample_rate': (number); + /** + * This field is the remote/origin address on which the request from the user was received. + * Note: This may not be the physical peer. E.g, if the remote address is inferred from for + * example the x-forwarder-for header, proxy protocol, etc. + */ + 'downstream_remote_address': (_envoy_config_core_v3_Address__Output | null); + /** + * This field is the local/destination address on which the request from the user was received. + */ + 'downstream_local_address': (_envoy_config_core_v3_Address__Output | null); + /** + * If the connection is secure,S this field will contain TLS properties. + */ + 'tls_properties': (_envoy_data_accesslog_v3_TLSProperties__Output | null); + /** + * The time that Envoy started servicing this request. This is effectively the time that the first + * downstream byte is received. + */ + 'start_time': (_google_protobuf_Timestamp__Output | null); + /** + * Interval between the first downstream byte received and the last + * downstream byte received (i.e. time it takes to receive a request). + */ + 'time_to_last_rx_byte': (_google_protobuf_Duration__Output | null); + /** + * Interval between the first downstream byte received and the first upstream byte sent. There may + * by considerable delta between ``time_to_last_rx_byte`` and this value due to filters. + * Additionally, the same caveats apply as documented in ``time_to_last_downstream_tx_byte`` about + * not accounting for kernel socket buffer time, etc. + */ + 'time_to_first_upstream_tx_byte': (_google_protobuf_Duration__Output | null); + /** + * Interval between the first downstream byte received and the last upstream byte sent. There may + * by considerable delta between ``time_to_last_rx_byte`` and this value due to filters. + * Additionally, the same caveats apply as documented in ``time_to_last_downstream_tx_byte`` about + * not accounting for kernel socket buffer time, etc. + */ + 'time_to_last_upstream_tx_byte': (_google_protobuf_Duration__Output | null); + /** + * Interval between the first downstream byte received and the first upstream + * byte received (i.e. time it takes to start receiving a response). + */ + 'time_to_first_upstream_rx_byte': (_google_protobuf_Duration__Output | null); + /** + * Interval between the first downstream byte received and the last upstream + * byte received (i.e. time it takes to receive a complete response). + */ + 'time_to_last_upstream_rx_byte': (_google_protobuf_Duration__Output | null); + /** + * Interval between the first downstream byte received and the first downstream byte sent. + * There may be a considerable delta between the ``time_to_first_upstream_rx_byte`` and this field + * due to filters. Additionally, the same caveats apply as documented in + * ``time_to_last_downstream_tx_byte`` about not accounting for kernel socket buffer time, etc. + */ + 'time_to_first_downstream_tx_byte': (_google_protobuf_Duration__Output | null); + /** + * Interval between the first downstream byte received and the last downstream byte sent. + * Depending on protocol, buffering, windowing, filters, etc. there may be a considerable delta + * between ``time_to_last_upstream_rx_byte`` and this field. Note also that this is an approximate + * time. In the current implementation it does not include kernel socket buffer time. In the + * current implementation it also does not include send window buffering inside the HTTP/2 codec. + * In the future it is likely that work will be done to make this duration more accurate. + */ + 'time_to_last_downstream_tx_byte': (_google_protobuf_Duration__Output | null); + /** + * The upstream remote/destination address that handles this exchange. This does not include + * retries. + */ + 'upstream_remote_address': (_envoy_config_core_v3_Address__Output | null); + /** + * The upstream local/origin address that handles this exchange. This does not include retries. + */ + 'upstream_local_address': (_envoy_config_core_v3_Address__Output | null); + /** + * The upstream cluster that ``upstream_remote_address`` belongs to. + */ + 'upstream_cluster': (string); + /** + * Flags indicating occurrences during request/response processing. + */ + 'response_flags': (_envoy_data_accesslog_v3_ResponseFlags__Output | null); + /** + * All metadata encountered during request processing, including endpoint + * selection. + * + * This can be used to associate IDs attached to the various configurations + * used to process this request with the access log entry. For example, a + * route created from a higher level forwarding rule with some ID can place + * that ID in this field and cross reference later. It can also be used to + * determine if a canary endpoint was used or not. + */ + 'metadata': (_envoy_config_core_v3_Metadata__Output | null); + /** + * If upstream connection failed due to transport socket (e.g. TLS handshake), provides the + * failure reason from the transport socket. The format of this field depends on the configured + * upstream transport socket. Common TLS failures are in + * :ref:`TLS trouble shooting `. + */ + 'upstream_transport_failure_reason': (string); + /** + * The name of the route + */ + 'route_name': (string); + /** + * This field is the downstream direct remote address on which the request from the user was + * received. Note: This is always the physical peer, even if the remote address is inferred from + * for example the x-forwarder-for header, proxy protocol, etc. + */ + 'downstream_direct_remote_address': (_envoy_config_core_v3_Address__Output | null); + /** + * Map of filter state in stream info that have been configured to be logged. If the filter + * state serialized to any message other than ``google.protobuf.Any`` it will be packed into + * ``google.protobuf.Any``. + */ + 'filter_state_objects': ({[key: string]: _google_protobuf_Any__Output}); + /** + * A list of custom tags, which annotate logs with additional information. + * To configure this value, users should configure + * :ref:`custom_tags `. + */ + 'custom_tags': ({[key: string]: string}); + /** + * For HTTP: Total duration in milliseconds of the request from the start time to the last byte out. + * For TCP: Total duration in milliseconds of the downstream connection. + * This is the total duration of the request (i.e., when the request's ActiveStream is destroyed) + * and may be longer than ``time_to_last_downstream_tx_byte``. + */ + 'duration': (_google_protobuf_Duration__Output | null); + /** + * For HTTP: Number of times the request is attempted upstream. Note that the field is omitted when the request was never attempted upstream. + * For TCP: Number of times the connection request is attempted upstream. Note that the field is omitted when the connect request was never attempted upstream. + */ + 'upstream_request_attempt_count': (number); + /** + * Connection termination details may provide additional information about why the connection was terminated by Envoy for L4 reasons. + */ + 'connection_termination_details': (string); + /** + * Optional unique id of stream (TCP connection, long-live HTTP2 stream, HTTP request) for logging and tracing. + * This could be any format string that could be used to identify one stream. + */ + 'stream_id': (string); + /** + * If this log entry is final log entry that flushed after the stream completed or + * intermediate log entry that flushed periodically during the stream. + * There may be multiple intermediate log entries and only one final log entry for each + * long-live stream (TCP connection, long-live HTTP2 stream). + * And if it is necessary, unique ID or identifier can be added to the log entry + * :ref:`stream_id ` to + * correlate all these intermediate log entries and final log entry. + * + * .. attention:: + * + * This field is deprecated in favor of ``access_log_type`` for better indication of the + * type of the access log record. + * @deprecated + */ + 'intermediate_log_entry': (boolean); + /** + * If downstream connection in listener failed due to transport socket (e.g. TLS handshake), provides the + * failure reason from the transport socket. The format of this field depends on the configured downstream + * transport socket. Common TLS failures are in :ref:`TLS trouble shooting `. + */ + 'downstream_transport_failure_reason': (string); + /** + * For HTTP: Total number of bytes sent to the downstream by the http stream. + * For TCP: Total number of bytes sent to the downstream by the tcp proxy. + */ + 'downstream_wire_bytes_sent': (string); + /** + * For HTTP: Total number of bytes received from the downstream by the http stream. Envoy over counts sizes of received HTTP/1.1 pipelined requests by adding up bytes of requests in the pipeline to the one currently being processed. + * For TCP: Total number of bytes received from the downstream by the tcp proxy. + */ + 'downstream_wire_bytes_received': (string); + /** + * For HTTP: Total number of bytes sent to the upstream by the http stream. This value accumulates during upstream retries. + * For TCP: Total number of bytes sent to the upstream by the tcp proxy. + */ + 'upstream_wire_bytes_sent': (string); + /** + * For HTTP: Total number of bytes received from the upstream by the http stream. + * For TCP: Total number of bytes sent to the upstream by the tcp proxy. + */ + 'upstream_wire_bytes_received': (string); + /** + * The type of the access log, which indicates when the log was recorded. + * See :ref:`ACCESS_LOG_TYPE ` for the available values. + * In case the access log was recorded by a flow which does not correspond to one of the supported + * values, then the default value will be ``NotSet``. + * For more information about how access log behaves and when it is being recorded, + * please refer to :ref:`access logging `. + */ + 'access_log_type': (_envoy_data_accesslog_v3_AccessLogType__Output); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/AccessLogType.ts b/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/AccessLogType.ts new file mode 100644 index 000000000..29ee32f5a --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/AccessLogType.ts @@ -0,0 +1,41 @@ +// Original file: deps/envoy-api/envoy/data/accesslog/v3/accesslog.proto + +export const AccessLogType = { + NotSet: 'NotSet', + TcpUpstreamConnected: 'TcpUpstreamConnected', + TcpPeriodic: 'TcpPeriodic', + TcpConnectionEnd: 'TcpConnectionEnd', + DownstreamStart: 'DownstreamStart', + DownstreamPeriodic: 'DownstreamPeriodic', + DownstreamEnd: 'DownstreamEnd', + UpstreamPoolReady: 'UpstreamPoolReady', + UpstreamPeriodic: 'UpstreamPeriodic', + UpstreamEnd: 'UpstreamEnd', + DownstreamTunnelSuccessfullyEstablished: 'DownstreamTunnelSuccessfullyEstablished', +} as const; + +export type AccessLogType = + | 'NotSet' + | 0 + | 'TcpUpstreamConnected' + | 1 + | 'TcpPeriodic' + | 2 + | 'TcpConnectionEnd' + | 3 + | 'DownstreamStart' + | 4 + | 'DownstreamPeriodic' + | 5 + | 'DownstreamEnd' + | 6 + | 'UpstreamPoolReady' + | 7 + | 'UpstreamPeriodic' + | 8 + | 'UpstreamEnd' + | 9 + | 'DownstreamTunnelSuccessfullyEstablished' + | 10 + +export type AccessLogType__Output = typeof AccessLogType[keyof typeof AccessLogType] diff --git a/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/ConnectionProperties.ts b/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/ConnectionProperties.ts new file mode 100644 index 000000000..a0cdfc75f --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/ConnectionProperties.ts @@ -0,0 +1,31 @@ +// Original file: deps/envoy-api/envoy/data/accesslog/v3/accesslog.proto + +import type { Long } from '@grpc/proto-loader'; + +/** + * Defines fields for a connection + */ +export interface ConnectionProperties { + /** + * Number of bytes received from downstream. + */ + 'received_bytes'?: (number | string | Long); + /** + * Number of bytes sent to downstream. + */ + 'sent_bytes'?: (number | string | Long); +} + +/** + * Defines fields for a connection + */ +export interface ConnectionProperties__Output { + /** + * Number of bytes received from downstream. + */ + 'received_bytes': (string); + /** + * Number of bytes sent to downstream. + */ + 'sent_bytes': (string); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/HTTPAccessLogEntry.ts b/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/HTTPAccessLogEntry.ts new file mode 100644 index 000000000..31daac364 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/HTTPAccessLogEntry.ts @@ -0,0 +1,70 @@ +// Original file: deps/envoy-api/envoy/data/accesslog/v3/accesslog.proto + +import type { AccessLogCommon as _envoy_data_accesslog_v3_AccessLogCommon, AccessLogCommon__Output as _envoy_data_accesslog_v3_AccessLogCommon__Output } from '../../../../envoy/data/accesslog/v3/AccessLogCommon'; +import type { HTTPRequestProperties as _envoy_data_accesslog_v3_HTTPRequestProperties, HTTPRequestProperties__Output as _envoy_data_accesslog_v3_HTTPRequestProperties__Output } from '../../../../envoy/data/accesslog/v3/HTTPRequestProperties'; +import type { HTTPResponseProperties as _envoy_data_accesslog_v3_HTTPResponseProperties, HTTPResponseProperties__Output as _envoy_data_accesslog_v3_HTTPResponseProperties__Output } from '../../../../envoy/data/accesslog/v3/HTTPResponseProperties'; + +// Original file: deps/envoy-api/envoy/data/accesslog/v3/accesslog.proto + +/** + * HTTP version + */ +export const _envoy_data_accesslog_v3_HTTPAccessLogEntry_HTTPVersion = { + PROTOCOL_UNSPECIFIED: 'PROTOCOL_UNSPECIFIED', + HTTP10: 'HTTP10', + HTTP11: 'HTTP11', + HTTP2: 'HTTP2', + HTTP3: 'HTTP3', +} as const; + +/** + * HTTP version + */ +export type _envoy_data_accesslog_v3_HTTPAccessLogEntry_HTTPVersion = + | 'PROTOCOL_UNSPECIFIED' + | 0 + | 'HTTP10' + | 1 + | 'HTTP11' + | 2 + | 'HTTP2' + | 3 + | 'HTTP3' + | 4 + +/** + * HTTP version + */ +export type _envoy_data_accesslog_v3_HTTPAccessLogEntry_HTTPVersion__Output = typeof _envoy_data_accesslog_v3_HTTPAccessLogEntry_HTTPVersion[keyof typeof _envoy_data_accesslog_v3_HTTPAccessLogEntry_HTTPVersion] + +export interface HTTPAccessLogEntry { + /** + * Common properties shared by all Envoy access logs. + */ + 'common_properties'?: (_envoy_data_accesslog_v3_AccessLogCommon | null); + 'protocol_version'?: (_envoy_data_accesslog_v3_HTTPAccessLogEntry_HTTPVersion); + /** + * Description of the incoming HTTP request. + */ + 'request'?: (_envoy_data_accesslog_v3_HTTPRequestProperties | null); + /** + * Description of the outgoing HTTP response. + */ + 'response'?: (_envoy_data_accesslog_v3_HTTPResponseProperties | null); +} + +export interface HTTPAccessLogEntry__Output { + /** + * Common properties shared by all Envoy access logs. + */ + 'common_properties': (_envoy_data_accesslog_v3_AccessLogCommon__Output | null); + 'protocol_version': (_envoy_data_accesslog_v3_HTTPAccessLogEntry_HTTPVersion__Output); + /** + * Description of the incoming HTTP request. + */ + 'request': (_envoy_data_accesslog_v3_HTTPRequestProperties__Output | null); + /** + * Description of the outgoing HTTP response. + */ + 'response': (_envoy_data_accesslog_v3_HTTPResponseProperties__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/HTTPRequestProperties.ts b/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/HTTPRequestProperties.ts new file mode 100644 index 000000000..f1271ba16 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/HTTPRequestProperties.ts @@ -0,0 +1,163 @@ +// Original file: deps/envoy-api/envoy/data/accesslog/v3/accesslog.proto + +import type { RequestMethod as _envoy_config_core_v3_RequestMethod, RequestMethod__Output as _envoy_config_core_v3_RequestMethod__Output } from '../../../../envoy/config/core/v3/RequestMethod'; +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; +import type { Long } from '@grpc/proto-loader'; + +/** + * [#next-free-field: 16] + */ +export interface HTTPRequestProperties { + /** + * The request method (RFC 7231/2616). + */ + 'request_method'?: (_envoy_config_core_v3_RequestMethod); + /** + * The scheme portion of the incoming request URI. + */ + 'scheme'?: (string); + /** + * HTTP/2 ``:authority`` or HTTP/1.1 ``Host`` header value. + */ + 'authority'?: (string); + /** + * The port of the incoming request URI + * (unused currently, as port is composed onto authority). + */ + 'port'?: (_google_protobuf_UInt32Value | null); + /** + * The path portion from the incoming request URI. + */ + 'path'?: (string); + /** + * Value of the ``User-Agent`` request header. + */ + 'user_agent'?: (string); + /** + * Value of the ``Referer`` request header. + */ + 'referer'?: (string); + /** + * Value of the ``X-Forwarded-For`` request header. + */ + 'forwarded_for'?: (string); + /** + * Value of the ``X-Request-Id`` request header + * + * This header is used by Envoy to uniquely identify a request. + * It will be generated for all external requests and internal requests that + * do not already have a request ID. + */ + 'request_id'?: (string); + /** + * Value of the ``X-Envoy-Original-Path`` request header. + */ + 'original_path'?: (string); + /** + * Size of the HTTP request headers in bytes. + * + * This value is captured from the OSI layer 7 perspective, i.e. it does not + * include overhead from framing or encoding at other networking layers. + */ + 'request_headers_bytes'?: (number | string | Long); + /** + * Size of the HTTP request body in bytes. + * + * This value is captured from the OSI layer 7 perspective, i.e. it does not + * include overhead from framing or encoding at other networking layers. + */ + 'request_body_bytes'?: (number | string | Long); + /** + * Map of additional headers that have been configured to be logged. + */ + 'request_headers'?: ({[key: string]: string}); + /** + * Number of header bytes sent to the upstream by the http stream, including protocol overhead. + * + * This value accumulates during upstream retries. + */ + 'upstream_header_bytes_sent'?: (number | string | Long); + /** + * Number of header bytes received from the downstream by the http stream, including protocol overhead. + */ + 'downstream_header_bytes_received'?: (number | string | Long); +} + +/** + * [#next-free-field: 16] + */ +export interface HTTPRequestProperties__Output { + /** + * The request method (RFC 7231/2616). + */ + 'request_method': (_envoy_config_core_v3_RequestMethod__Output); + /** + * The scheme portion of the incoming request URI. + */ + 'scheme': (string); + /** + * HTTP/2 ``:authority`` or HTTP/1.1 ``Host`` header value. + */ + 'authority': (string); + /** + * The port of the incoming request URI + * (unused currently, as port is composed onto authority). + */ + 'port': (_google_protobuf_UInt32Value__Output | null); + /** + * The path portion from the incoming request URI. + */ + 'path': (string); + /** + * Value of the ``User-Agent`` request header. + */ + 'user_agent': (string); + /** + * Value of the ``Referer`` request header. + */ + 'referer': (string); + /** + * Value of the ``X-Forwarded-For`` request header. + */ + 'forwarded_for': (string); + /** + * Value of the ``X-Request-Id`` request header + * + * This header is used by Envoy to uniquely identify a request. + * It will be generated for all external requests and internal requests that + * do not already have a request ID. + */ + 'request_id': (string); + /** + * Value of the ``X-Envoy-Original-Path`` request header. + */ + 'original_path': (string); + /** + * Size of the HTTP request headers in bytes. + * + * This value is captured from the OSI layer 7 perspective, i.e. it does not + * include overhead from framing or encoding at other networking layers. + */ + 'request_headers_bytes': (string); + /** + * Size of the HTTP request body in bytes. + * + * This value is captured from the OSI layer 7 perspective, i.e. it does not + * include overhead from framing or encoding at other networking layers. + */ + 'request_body_bytes': (string); + /** + * Map of additional headers that have been configured to be logged. + */ + 'request_headers': ({[key: string]: string}); + /** + * Number of header bytes sent to the upstream by the http stream, including protocol overhead. + * + * This value accumulates during upstream retries. + */ + 'upstream_header_bytes_sent': (string); + /** + * Number of header bytes received from the downstream by the http stream, including protocol overhead. + */ + 'downstream_header_bytes_received': (string); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/HTTPResponseProperties.ts b/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/HTTPResponseProperties.ts new file mode 100644 index 000000000..1368fc8cd --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/HTTPResponseProperties.ts @@ -0,0 +1,92 @@ +// Original file: deps/envoy-api/envoy/data/accesslog/v3/accesslog.proto + +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; +import type { Long } from '@grpc/proto-loader'; + +/** + * [#next-free-field: 9] + */ +export interface HTTPResponseProperties { + /** + * The HTTP response code returned by Envoy. + */ + 'response_code'?: (_google_protobuf_UInt32Value | null); + /** + * Size of the HTTP response headers in bytes. + * + * This value is captured from the OSI layer 7 perspective, i.e. it does not + * include protocol overhead or overhead from framing or encoding at other networking layers. + */ + 'response_headers_bytes'?: (number | string | Long); + /** + * Size of the HTTP response body in bytes. + * + * This value is captured from the OSI layer 7 perspective, i.e. it does not + * include overhead from framing or encoding at other networking layers. + */ + 'response_body_bytes'?: (number | string | Long); + /** + * Map of additional headers configured to be logged. + */ + 'response_headers'?: ({[key: string]: string}); + /** + * Map of trailers configured to be logged. + */ + 'response_trailers'?: ({[key: string]: string}); + /** + * The HTTP response code details. + */ + 'response_code_details'?: (string); + /** + * Number of header bytes received from the upstream by the http stream, including protocol overhead. + */ + 'upstream_header_bytes_received'?: (number | string | Long); + /** + * Number of header bytes sent to the downstream by the http stream, including protocol overhead. + */ + 'downstream_header_bytes_sent'?: (number | string | Long); +} + +/** + * [#next-free-field: 9] + */ +export interface HTTPResponseProperties__Output { + /** + * The HTTP response code returned by Envoy. + */ + 'response_code': (_google_protobuf_UInt32Value__Output | null); + /** + * Size of the HTTP response headers in bytes. + * + * This value is captured from the OSI layer 7 perspective, i.e. it does not + * include protocol overhead or overhead from framing or encoding at other networking layers. + */ + 'response_headers_bytes': (string); + /** + * Size of the HTTP response body in bytes. + * + * This value is captured from the OSI layer 7 perspective, i.e. it does not + * include overhead from framing or encoding at other networking layers. + */ + 'response_body_bytes': (string); + /** + * Map of additional headers configured to be logged. + */ + 'response_headers': ({[key: string]: string}); + /** + * Map of trailers configured to be logged. + */ + 'response_trailers': ({[key: string]: string}); + /** + * The HTTP response code details. + */ + 'response_code_details': (string); + /** + * Number of header bytes received from the upstream by the http stream, including protocol overhead. + */ + 'upstream_header_bytes_received': (string); + /** + * Number of header bytes sent to the downstream by the http stream, including protocol overhead. + */ + 'downstream_header_bytes_sent': (string); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/ResponseFlags.ts b/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/ResponseFlags.ts new file mode 100644 index 000000000..f42e11ee3 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/ResponseFlags.ts @@ -0,0 +1,272 @@ +// Original file: deps/envoy-api/envoy/data/accesslog/v3/accesslog.proto + + +// Original file: deps/envoy-api/envoy/data/accesslog/v3/accesslog.proto + +/** + * Reasons why the request was unauthorized + */ +export const _envoy_data_accesslog_v3_ResponseFlags_Unauthorized_Reason = { + REASON_UNSPECIFIED: 'REASON_UNSPECIFIED', + /** + * The request was denied by the external authorization service. + */ + EXTERNAL_SERVICE: 'EXTERNAL_SERVICE', +} as const; + +/** + * Reasons why the request was unauthorized + */ +export type _envoy_data_accesslog_v3_ResponseFlags_Unauthorized_Reason = + | 'REASON_UNSPECIFIED' + | 0 + /** + * The request was denied by the external authorization service. + */ + | 'EXTERNAL_SERVICE' + | 1 + +/** + * Reasons why the request was unauthorized + */ +export type _envoy_data_accesslog_v3_ResponseFlags_Unauthorized_Reason__Output = typeof _envoy_data_accesslog_v3_ResponseFlags_Unauthorized_Reason[keyof typeof _envoy_data_accesslog_v3_ResponseFlags_Unauthorized_Reason] + +export interface _envoy_data_accesslog_v3_ResponseFlags_Unauthorized { + 'reason'?: (_envoy_data_accesslog_v3_ResponseFlags_Unauthorized_Reason); +} + +export interface _envoy_data_accesslog_v3_ResponseFlags_Unauthorized__Output { + 'reason': (_envoy_data_accesslog_v3_ResponseFlags_Unauthorized_Reason__Output); +} + +/** + * Flags indicating occurrences during request/response processing. + * [#next-free-field: 28] + */ +export interface ResponseFlags { + /** + * Indicates local server healthcheck failed. + */ + 'failed_local_healthcheck'?: (boolean); + /** + * Indicates there was no healthy upstream. + */ + 'no_healthy_upstream'?: (boolean); + /** + * Indicates an there was an upstream request timeout. + */ + 'upstream_request_timeout'?: (boolean); + /** + * Indicates local codec level reset was sent on the stream. + */ + 'local_reset'?: (boolean); + /** + * Indicates remote codec level reset was received on the stream. + */ + 'upstream_remote_reset'?: (boolean); + /** + * Indicates there was a local reset by a connection pool due to an initial connection failure. + */ + 'upstream_connection_failure'?: (boolean); + /** + * Indicates the stream was reset due to an upstream connection termination. + */ + 'upstream_connection_termination'?: (boolean); + /** + * Indicates the stream was reset because of a resource overflow. + */ + 'upstream_overflow'?: (boolean); + /** + * Indicates no route was found for the request. + */ + 'no_route_found'?: (boolean); + /** + * Indicates that the request was delayed before proxying. + */ + 'delay_injected'?: (boolean); + /** + * Indicates that the request was aborted with an injected error code. + */ + 'fault_injected'?: (boolean); + /** + * Indicates that the request was rate-limited locally. + */ + 'rate_limited'?: (boolean); + /** + * Indicates if the request was deemed unauthorized and the reason for it. + */ + 'unauthorized_details'?: (_envoy_data_accesslog_v3_ResponseFlags_Unauthorized | null); + /** + * Indicates that the request was rejected because there was an error in rate limit service. + */ + 'rate_limit_service_error'?: (boolean); + /** + * Indicates the stream was reset due to a downstream connection termination. + */ + 'downstream_connection_termination'?: (boolean); + /** + * Indicates that the upstream retry limit was exceeded, resulting in a downstream error. + */ + 'upstream_retry_limit_exceeded'?: (boolean); + /** + * Indicates that the stream idle timeout was hit, resulting in a downstream 408. + */ + 'stream_idle_timeout'?: (boolean); + /** + * Indicates that the request was rejected because an envoy request header failed strict + * validation. + */ + 'invalid_envoy_request_headers'?: (boolean); + /** + * Indicates there was an HTTP protocol error on the downstream request. + */ + 'downstream_protocol_error'?: (boolean); + /** + * Indicates there was a max stream duration reached on the upstream request. + */ + 'upstream_max_stream_duration_reached'?: (boolean); + /** + * Indicates the response was served from a cache filter. + */ + 'response_from_cache_filter'?: (boolean); + /** + * Indicates that a filter configuration is not available. + */ + 'no_filter_config_found'?: (boolean); + /** + * Indicates that request or connection exceeded the downstream connection duration. + */ + 'duration_timeout'?: (boolean); + /** + * Indicates there was an HTTP protocol error in the upstream response. + */ + 'upstream_protocol_error'?: (boolean); + /** + * Indicates no cluster was found for the request. + */ + 'no_cluster_found'?: (boolean); + /** + * Indicates overload manager terminated the request. + */ + 'overload_manager'?: (boolean); + /** + * Indicates a DNS resolution failed. + */ + 'dns_resolution_failure'?: (boolean); +} + +/** + * Flags indicating occurrences during request/response processing. + * [#next-free-field: 28] + */ +export interface ResponseFlags__Output { + /** + * Indicates local server healthcheck failed. + */ + 'failed_local_healthcheck': (boolean); + /** + * Indicates there was no healthy upstream. + */ + 'no_healthy_upstream': (boolean); + /** + * Indicates an there was an upstream request timeout. + */ + 'upstream_request_timeout': (boolean); + /** + * Indicates local codec level reset was sent on the stream. + */ + 'local_reset': (boolean); + /** + * Indicates remote codec level reset was received on the stream. + */ + 'upstream_remote_reset': (boolean); + /** + * Indicates there was a local reset by a connection pool due to an initial connection failure. + */ + 'upstream_connection_failure': (boolean); + /** + * Indicates the stream was reset due to an upstream connection termination. + */ + 'upstream_connection_termination': (boolean); + /** + * Indicates the stream was reset because of a resource overflow. + */ + 'upstream_overflow': (boolean); + /** + * Indicates no route was found for the request. + */ + 'no_route_found': (boolean); + /** + * Indicates that the request was delayed before proxying. + */ + 'delay_injected': (boolean); + /** + * Indicates that the request was aborted with an injected error code. + */ + 'fault_injected': (boolean); + /** + * Indicates that the request was rate-limited locally. + */ + 'rate_limited': (boolean); + /** + * Indicates if the request was deemed unauthorized and the reason for it. + */ + 'unauthorized_details': (_envoy_data_accesslog_v3_ResponseFlags_Unauthorized__Output | null); + /** + * Indicates that the request was rejected because there was an error in rate limit service. + */ + 'rate_limit_service_error': (boolean); + /** + * Indicates the stream was reset due to a downstream connection termination. + */ + 'downstream_connection_termination': (boolean); + /** + * Indicates that the upstream retry limit was exceeded, resulting in a downstream error. + */ + 'upstream_retry_limit_exceeded': (boolean); + /** + * Indicates that the stream idle timeout was hit, resulting in a downstream 408. + */ + 'stream_idle_timeout': (boolean); + /** + * Indicates that the request was rejected because an envoy request header failed strict + * validation. + */ + 'invalid_envoy_request_headers': (boolean); + /** + * Indicates there was an HTTP protocol error on the downstream request. + */ + 'downstream_protocol_error': (boolean); + /** + * Indicates there was a max stream duration reached on the upstream request. + */ + 'upstream_max_stream_duration_reached': (boolean); + /** + * Indicates the response was served from a cache filter. + */ + 'response_from_cache_filter': (boolean); + /** + * Indicates that a filter configuration is not available. + */ + 'no_filter_config_found': (boolean); + /** + * Indicates that request or connection exceeded the downstream connection duration. + */ + 'duration_timeout': (boolean); + /** + * Indicates there was an HTTP protocol error in the upstream response. + */ + 'upstream_protocol_error': (boolean); + /** + * Indicates no cluster was found for the request. + */ + 'no_cluster_found': (boolean); + /** + * Indicates overload manager terminated the request. + */ + 'overload_manager': (boolean); + /** + * Indicates a DNS resolution failed. + */ + 'dns_resolution_failure': (boolean); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/TCPAccessLogEntry.ts b/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/TCPAccessLogEntry.ts new file mode 100644 index 000000000..55e9cace0 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/TCPAccessLogEntry.ts @@ -0,0 +1,26 @@ +// Original file: deps/envoy-api/envoy/data/accesslog/v3/accesslog.proto + +import type { AccessLogCommon as _envoy_data_accesslog_v3_AccessLogCommon, AccessLogCommon__Output as _envoy_data_accesslog_v3_AccessLogCommon__Output } from '../../../../envoy/data/accesslog/v3/AccessLogCommon'; +import type { ConnectionProperties as _envoy_data_accesslog_v3_ConnectionProperties, ConnectionProperties__Output as _envoy_data_accesslog_v3_ConnectionProperties__Output } from '../../../../envoy/data/accesslog/v3/ConnectionProperties'; + +export interface TCPAccessLogEntry { + /** + * Common properties shared by all Envoy access logs. + */ + 'common_properties'?: (_envoy_data_accesslog_v3_AccessLogCommon | null); + /** + * Properties of the TCP connection. + */ + 'connection_properties'?: (_envoy_data_accesslog_v3_ConnectionProperties | null); +} + +export interface TCPAccessLogEntry__Output { + /** + * Common properties shared by all Envoy access logs. + */ + 'common_properties': (_envoy_data_accesslog_v3_AccessLogCommon__Output | null); + /** + * Properties of the TCP connection. + */ + 'connection_properties': (_envoy_data_accesslog_v3_ConnectionProperties__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/TLSProperties.ts b/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/TLSProperties.ts new file mode 100644 index 000000000..ddeb9a1ae --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/data/accesslog/v3/TLSProperties.ts @@ -0,0 +1,145 @@ +// Original file: deps/envoy-api/envoy/data/accesslog/v3/accesslog.proto + +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; + +export interface _envoy_data_accesslog_v3_TLSProperties_CertificateProperties { + /** + * SANs present in the certificate. + */ + 'subject_alt_name'?: (_envoy_data_accesslog_v3_TLSProperties_CertificateProperties_SubjectAltName)[]; + /** + * The subject field of the certificate. + */ + 'subject'?: (string); +} + +export interface _envoy_data_accesslog_v3_TLSProperties_CertificateProperties__Output { + /** + * SANs present in the certificate. + */ + 'subject_alt_name': (_envoy_data_accesslog_v3_TLSProperties_CertificateProperties_SubjectAltName__Output)[]; + /** + * The subject field of the certificate. + */ + 'subject': (string); +} + +export interface _envoy_data_accesslog_v3_TLSProperties_CertificateProperties_SubjectAltName { + 'uri'?: (string); + /** + * [#not-implemented-hide:] + */ + 'dns'?: (string); + 'san'?: "uri"|"dns"; +} + +export interface _envoy_data_accesslog_v3_TLSProperties_CertificateProperties_SubjectAltName__Output { + 'uri'?: (string); + /** + * [#not-implemented-hide:] + */ + 'dns'?: (string); + 'san': "uri"|"dns"; +} + +// Original file: deps/envoy-api/envoy/data/accesslog/v3/accesslog.proto + +export const _envoy_data_accesslog_v3_TLSProperties_TLSVersion = { + VERSION_UNSPECIFIED: 'VERSION_UNSPECIFIED', + TLSv1: 'TLSv1', + TLSv1_1: 'TLSv1_1', + TLSv1_2: 'TLSv1_2', + TLSv1_3: 'TLSv1_3', +} as const; + +export type _envoy_data_accesslog_v3_TLSProperties_TLSVersion = + | 'VERSION_UNSPECIFIED' + | 0 + | 'TLSv1' + | 1 + | 'TLSv1_1' + | 2 + | 'TLSv1_2' + | 3 + | 'TLSv1_3' + | 4 + +export type _envoy_data_accesslog_v3_TLSProperties_TLSVersion__Output = typeof _envoy_data_accesslog_v3_TLSProperties_TLSVersion[keyof typeof _envoy_data_accesslog_v3_TLSProperties_TLSVersion] + +/** + * Properties of a negotiated TLS connection. + * [#next-free-field: 8] + */ +export interface TLSProperties { + /** + * Version of TLS that was negotiated. + */ + 'tls_version'?: (_envoy_data_accesslog_v3_TLSProperties_TLSVersion); + /** + * TLS cipher suite negotiated during handshake. The value is a + * four-digit hex code defined by the IANA TLS Cipher Suite Registry + * (e.g. ``009C`` for ``TLS_RSA_WITH_AES_128_GCM_SHA256``). + * + * Here it is expressed as an integer. + */ + 'tls_cipher_suite'?: (_google_protobuf_UInt32Value | null); + /** + * SNI hostname from handshake. + */ + 'tls_sni_hostname'?: (string); + /** + * Properties of the local certificate used to negotiate TLS. + */ + 'local_certificate_properties'?: (_envoy_data_accesslog_v3_TLSProperties_CertificateProperties | null); + /** + * Properties of the peer certificate used to negotiate TLS. + */ + 'peer_certificate_properties'?: (_envoy_data_accesslog_v3_TLSProperties_CertificateProperties | null); + /** + * The TLS session ID. + */ + 'tls_session_id'?: (string); + /** + * The ``JA3`` fingerprint when ``JA3`` fingerprinting is enabled. + */ + 'ja3_fingerprint'?: (string); +} + +/** + * Properties of a negotiated TLS connection. + * [#next-free-field: 8] + */ +export interface TLSProperties__Output { + /** + * Version of TLS that was negotiated. + */ + 'tls_version': (_envoy_data_accesslog_v3_TLSProperties_TLSVersion__Output); + /** + * TLS cipher suite negotiated during handshake. The value is a + * four-digit hex code defined by the IANA TLS Cipher Suite Registry + * (e.g. ``009C`` for ``TLS_RSA_WITH_AES_128_GCM_SHA256``). + * + * Here it is expressed as an integer. + */ + 'tls_cipher_suite': (_google_protobuf_UInt32Value__Output | null); + /** + * SNI hostname from handshake. + */ + 'tls_sni_hostname': (string); + /** + * Properties of the local certificate used to negotiate TLS. + */ + 'local_certificate_properties': (_envoy_data_accesslog_v3_TLSProperties_CertificateProperties__Output | null); + /** + * Properties of the peer certificate used to negotiate TLS. + */ + 'peer_certificate_properties': (_envoy_data_accesslog_v3_TLSProperties_CertificateProperties__Output | null); + /** + * The TLS session ID. + */ + 'tls_session_id': (string); + /** + * The ``JA3`` fingerprint when ``JA3`` fingerprinting is enabled. + */ + 'ja3_fingerprint': (string); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/extensions/clusters/aggregate/v3/ClusterConfig.ts b/packages/grpc-js-xds/src/generated/envoy/extensions/clusters/aggregate/v3/ClusterConfig.ts new file mode 100644 index 000000000..49245ac7d --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/extensions/clusters/aggregate/v3/ClusterConfig.ts @@ -0,0 +1,28 @@ +// Original file: deps/envoy-api/envoy/extensions/clusters/aggregate/v3/cluster.proto + + +/** + * Configuration for the aggregate cluster. See the :ref:`architecture overview + * ` for more information. + * [#extension: envoy.clusters.aggregate] + */ +export interface ClusterConfig { + /** + * Load balancing clusters in aggregate cluster. Clusters are prioritized based on the order they + * appear in this list. + */ + 'clusters'?: (string)[]; +} + +/** + * Configuration for the aggregate cluster. See the :ref:`architecture overview + * ` for more information. + * [#extension: envoy.clusters.aggregate] + */ +export interface ClusterConfig__Output { + /** + * Load balancing clusters in aggregate cluster. Clusters are prioritized based on the order they + * appear in this list. + */ + 'clusters': (string)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/extensions/filters/common/fault/v3/FaultDelay.ts b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/common/fault/v3/FaultDelay.ts new file mode 100644 index 000000000..e070ae913 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/common/fault/v3/FaultDelay.ts @@ -0,0 +1,88 @@ +// Original file: deps/envoy-api/envoy/extensions/filters/common/fault/v3/fault.proto + +import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../../../google/protobuf/Duration'; +import type { FractionalPercent as _envoy_type_v3_FractionalPercent, FractionalPercent__Output as _envoy_type_v3_FractionalPercent__Output } from '../../../../../../envoy/type/v3/FractionalPercent'; + +// Original file: deps/envoy-api/envoy/extensions/filters/common/fault/v3/fault.proto + +export const _envoy_extensions_filters_common_fault_v3_FaultDelay_FaultDelayType = { + /** + * Unused and deprecated. + */ + FIXED: 'FIXED', +} as const; + +export type _envoy_extensions_filters_common_fault_v3_FaultDelay_FaultDelayType = + /** + * Unused and deprecated. + */ + | 'FIXED' + | 0 + +export type _envoy_extensions_filters_common_fault_v3_FaultDelay_FaultDelayType__Output = typeof _envoy_extensions_filters_common_fault_v3_FaultDelay_FaultDelayType[keyof typeof _envoy_extensions_filters_common_fault_v3_FaultDelay_FaultDelayType] + +/** + * Fault delays are controlled via an HTTP header (if applicable). See the + * :ref:`HTTP fault filter ` documentation for + * more information. + */ +export interface _envoy_extensions_filters_common_fault_v3_FaultDelay_HeaderDelay { +} + +/** + * Fault delays are controlled via an HTTP header (if applicable). See the + * :ref:`HTTP fault filter ` documentation for + * more information. + */ +export interface _envoy_extensions_filters_common_fault_v3_FaultDelay_HeaderDelay__Output { +} + +/** + * Delay specification is used to inject latency into the + * HTTP/Mongo operation. + * [#next-free-field: 6] + */ +export interface FaultDelay { + /** + * Add a fixed delay before forwarding the operation upstream. See + * https://developers.google.com/protocol-buffers/docs/proto3#json for + * the JSON/YAML Duration mapping. For HTTP/Mongo, the specified + * delay will be injected before a new request/operation. + * This is required if type is FIXED. + */ + 'fixed_delay'?: (_google_protobuf_Duration | null); + /** + * The percentage of operations/connections/requests on which the delay will be injected. + */ + 'percentage'?: (_envoy_type_v3_FractionalPercent | null); + /** + * Fault delays are controlled via an HTTP header (if applicable). + */ + 'header_delay'?: (_envoy_extensions_filters_common_fault_v3_FaultDelay_HeaderDelay | null); + 'fault_delay_secifier'?: "fixed_delay"|"header_delay"; +} + +/** + * Delay specification is used to inject latency into the + * HTTP/Mongo operation. + * [#next-free-field: 6] + */ +export interface FaultDelay__Output { + /** + * Add a fixed delay before forwarding the operation upstream. See + * https://developers.google.com/protocol-buffers/docs/proto3#json for + * the JSON/YAML Duration mapping. For HTTP/Mongo, the specified + * delay will be injected before a new request/operation. + * This is required if type is FIXED. + */ + 'fixed_delay'?: (_google_protobuf_Duration__Output | null); + /** + * The percentage of operations/connections/requests on which the delay will be injected. + */ + 'percentage': (_envoy_type_v3_FractionalPercent__Output | null); + /** + * Fault delays are controlled via an HTTP header (if applicable). + */ + 'header_delay'?: (_envoy_extensions_filters_common_fault_v3_FaultDelay_HeaderDelay__Output | null); + 'fault_delay_secifier': "fixed_delay"|"header_delay"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/extensions/filters/common/fault/v3/FaultRateLimit.ts b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/common/fault/v3/FaultRateLimit.ts new file mode 100644 index 000000000..4df7395bb --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/common/fault/v3/FaultRateLimit.ts @@ -0,0 +1,78 @@ +// Original file: deps/envoy-api/envoy/extensions/filters/common/fault/v3/fault.proto + +import type { FractionalPercent as _envoy_type_v3_FractionalPercent, FractionalPercent__Output as _envoy_type_v3_FractionalPercent__Output } from '../../../../../../envoy/type/v3/FractionalPercent'; +import type { Long } from '@grpc/proto-loader'; + +/** + * Describes a fixed/constant rate limit. + */ +export interface _envoy_extensions_filters_common_fault_v3_FaultRateLimit_FixedLimit { + /** + * The limit supplied in KiB/s. + */ + 'limit_kbps'?: (number | string | Long); +} + +/** + * Describes a fixed/constant rate limit. + */ +export interface _envoy_extensions_filters_common_fault_v3_FaultRateLimit_FixedLimit__Output { + /** + * The limit supplied in KiB/s. + */ + 'limit_kbps': (string); +} + +/** + * Rate limits are controlled via an HTTP header (if applicable). See the + * :ref:`HTTP fault filter ` documentation for + * more information. + */ +export interface _envoy_extensions_filters_common_fault_v3_FaultRateLimit_HeaderLimit { +} + +/** + * Rate limits are controlled via an HTTP header (if applicable). See the + * :ref:`HTTP fault filter ` documentation for + * more information. + */ +export interface _envoy_extensions_filters_common_fault_v3_FaultRateLimit_HeaderLimit__Output { +} + +/** + * Describes a rate limit to be applied. + */ +export interface FaultRateLimit { + /** + * A fixed rate limit. + */ + 'fixed_limit'?: (_envoy_extensions_filters_common_fault_v3_FaultRateLimit_FixedLimit | null); + /** + * The percentage of operations/connections/requests on which the rate limit will be injected. + */ + 'percentage'?: (_envoy_type_v3_FractionalPercent | null); + /** + * Rate limits are controlled via an HTTP header (if applicable). + */ + 'header_limit'?: (_envoy_extensions_filters_common_fault_v3_FaultRateLimit_HeaderLimit | null); + 'limit_type'?: "fixed_limit"|"header_limit"; +} + +/** + * Describes a rate limit to be applied. + */ +export interface FaultRateLimit__Output { + /** + * A fixed rate limit. + */ + 'fixed_limit'?: (_envoy_extensions_filters_common_fault_v3_FaultRateLimit_FixedLimit__Output | null); + /** + * The percentage of operations/connections/requests on which the rate limit will be injected. + */ + 'percentage': (_envoy_type_v3_FractionalPercent__Output | null); + /** + * Rate limits are controlled via an HTTP header (if applicable). + */ + 'header_limit'?: (_envoy_extensions_filters_common_fault_v3_FaultRateLimit_HeaderLimit__Output | null); + 'limit_type': "fixed_limit"|"header_limit"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/extensions/filters/http/fault/v3/FaultAbort.ts b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/http/fault/v3/FaultAbort.ts new file mode 100644 index 000000000..823706cb7 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/http/fault/v3/FaultAbort.ts @@ -0,0 +1,67 @@ +// Original file: deps/envoy-api/envoy/extensions/filters/http/fault/v3/fault.proto + +import type { FractionalPercent as _envoy_type_v3_FractionalPercent, FractionalPercent__Output as _envoy_type_v3_FractionalPercent__Output } from '../../../../../../envoy/type/v3/FractionalPercent'; + +/** + * Fault aborts are controlled via an HTTP header (if applicable). See the + * :ref:`HTTP fault filter ` documentation for + * more information. + */ +export interface _envoy_extensions_filters_http_fault_v3_FaultAbort_HeaderAbort { +} + +/** + * Fault aborts are controlled via an HTTP header (if applicable). See the + * :ref:`HTTP fault filter ` documentation for + * more information. + */ +export interface _envoy_extensions_filters_http_fault_v3_FaultAbort_HeaderAbort__Output { +} + +/** + * [#next-free-field: 6] + */ +export interface FaultAbort { + /** + * HTTP status code to use to abort the HTTP request. + */ + 'http_status'?: (number); + /** + * The percentage of requests/operations/connections that will be aborted with the error code + * provided. + */ + 'percentage'?: (_envoy_type_v3_FractionalPercent | null); + /** + * Fault aborts are controlled via an HTTP header (if applicable). + */ + 'header_abort'?: (_envoy_extensions_filters_http_fault_v3_FaultAbort_HeaderAbort | null); + /** + * gRPC status code to use to abort the gRPC request. + */ + 'grpc_status'?: (number); + 'error_type'?: "http_status"|"grpc_status"|"header_abort"; +} + +/** + * [#next-free-field: 6] + */ +export interface FaultAbort__Output { + /** + * HTTP status code to use to abort the HTTP request. + */ + 'http_status'?: (number); + /** + * The percentage of requests/operations/connections that will be aborted with the error code + * provided. + */ + 'percentage': (_envoy_type_v3_FractionalPercent__Output | null); + /** + * Fault aborts are controlled via an HTTP header (if applicable). + */ + 'header_abort'?: (_envoy_extensions_filters_http_fault_v3_FaultAbort_HeaderAbort__Output | null); + /** + * gRPC status code to use to abort the gRPC request. + */ + 'grpc_status'?: (number); + 'error_type': "http_status"|"grpc_status"|"header_abort"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/extensions/filters/http/fault/v3/HTTPFault.ts b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/http/fault/v3/HTTPFault.ts new file mode 100644 index 000000000..dd3a3b50c --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/http/fault/v3/HTTPFault.ts @@ -0,0 +1,244 @@ +// Original file: deps/envoy-api/envoy/extensions/filters/http/fault/v3/fault.proto + +import type { FaultDelay as _envoy_extensions_filters_common_fault_v3_FaultDelay, FaultDelay__Output as _envoy_extensions_filters_common_fault_v3_FaultDelay__Output } from '../../../../../../envoy/extensions/filters/common/fault/v3/FaultDelay'; +import type { FaultAbort as _envoy_extensions_filters_http_fault_v3_FaultAbort, FaultAbort__Output as _envoy_extensions_filters_http_fault_v3_FaultAbort__Output } from '../../../../../../envoy/extensions/filters/http/fault/v3/FaultAbort'; +import type { HeaderMatcher as _envoy_config_route_v3_HeaderMatcher, HeaderMatcher__Output as _envoy_config_route_v3_HeaderMatcher__Output } from '../../../../../../envoy/config/route/v3/HeaderMatcher'; +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../../../google/protobuf/UInt32Value'; +import type { FaultRateLimit as _envoy_extensions_filters_common_fault_v3_FaultRateLimit, FaultRateLimit__Output as _envoy_extensions_filters_common_fault_v3_FaultRateLimit__Output } from '../../../../../../envoy/extensions/filters/common/fault/v3/FaultRateLimit'; +import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../../../../google/protobuf/Struct'; + +/** + * [#next-free-field: 17] + */ +export interface HTTPFault { + /** + * If specified, the filter will inject delays based on the values in the + * object. + */ + 'delay'?: (_envoy_extensions_filters_common_fault_v3_FaultDelay | null); + /** + * If specified, the filter will abort requests based on the values in + * the object. At least ``abort`` or ``delay`` must be specified. + */ + 'abort'?: (_envoy_extensions_filters_http_fault_v3_FaultAbort | null); + /** + * Specifies the name of the (destination) upstream cluster that the + * filter should match on. Fault injection will be restricted to requests + * bound to the specific upstream cluster. + */ + 'upstream_cluster'?: (string); + /** + * Specifies a set of headers that the filter should match on. The fault + * injection filter can be applied selectively to requests that match a set of + * headers specified in the fault filter config. The chances of actual fault + * injection further depend on the value of the :ref:`percentage + * ` field. + * The filter will check the request's headers against all the specified + * headers in the filter config. A match will happen if all the headers in the + * config are present in the request with the same values (or based on + * presence if the ``value`` field is not in the config). + */ + 'headers'?: (_envoy_config_route_v3_HeaderMatcher)[]; + /** + * Faults are injected for the specified list of downstream hosts. If this + * setting is not set, faults are injected for all downstream nodes. + * Downstream node name is taken from :ref:`the HTTP + * x-envoy-downstream-service-node + * ` header and compared + * against downstream_nodes list. + */ + 'downstream_nodes'?: (string)[]; + /** + * The maximum number of faults that can be active at a single time via the configured fault + * filter. Note that because this setting can be overridden at the route level, it's possible + * for the number of active faults to be greater than this value (if injected via a different + * route). If not specified, defaults to unlimited. This setting can be overridden via + * ``runtime `` and any faults that are not injected + * due to overflow will be indicated via the ``faults_overflow + * `` stat. + * + * .. attention:: + * Like other :ref:`circuit breakers ` in Envoy, this is a fuzzy + * limit. It's possible for the number of active faults to rise slightly above the configured + * amount due to the implementation details. + */ + 'max_active_faults'?: (_google_protobuf_UInt32Value | null); + /** + * The response rate limit to be applied to the response body of the stream. When configured, + * the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent + * ` runtime key. + * + * .. attention:: + * This is a per-stream limit versus a connection level limit. This means that concurrent streams + * will each get an independent limit. + */ + 'response_rate_limit'?: (_envoy_extensions_filters_common_fault_v3_FaultRateLimit | null); + /** + * The runtime key to override the :ref:`default ` + * runtime. The default is: fault.http.delay.fixed_delay_percent + */ + 'delay_percent_runtime'?: (string); + /** + * The runtime key to override the :ref:`default ` + * runtime. The default is: fault.http.abort.abort_percent + */ + 'abort_percent_runtime'?: (string); + /** + * The runtime key to override the :ref:`default ` + * runtime. The default is: fault.http.delay.fixed_duration_ms + */ + 'delay_duration_runtime'?: (string); + /** + * The runtime key to override the :ref:`default ` + * runtime. The default is: fault.http.abort.http_status + */ + 'abort_http_status_runtime'?: (string); + /** + * The runtime key to override the :ref:`default ` + * runtime. The default is: fault.http.max_active_faults + */ + 'max_active_faults_runtime'?: (string); + /** + * The runtime key to override the :ref:`default ` + * runtime. The default is: fault.http.rate_limit.response_percent + */ + 'response_rate_limit_percent_runtime'?: (string); + /** + * The runtime key to override the :ref:`default ` + * runtime. The default is: fault.http.abort.grpc_status + */ + 'abort_grpc_status_runtime'?: (string); + /** + * To control whether stats storage is allocated dynamically for each downstream server. + * If set to true, "x-envoy-downstream-service-cluster" field of header will be ignored by this filter. + * If set to false, dynamic stats storage will be allocated for the downstream cluster name. + * Default value is false. + */ + 'disable_downstream_cluster_stats'?: (boolean); + /** + * When an abort or delay fault is executed, the metadata struct provided here will be added to the + * request's dynamic metadata under the namespace corresponding to the name of the fault filter. + * This data can be logged as part of Access Logs using the :ref:`command operator + * ` %DYNAMIC_METADATA(NAMESPACE)%, where NAMESPACE is the name of + * the fault filter. + */ + 'filter_metadata'?: (_google_protobuf_Struct | null); +} + +/** + * [#next-free-field: 17] + */ +export interface HTTPFault__Output { + /** + * If specified, the filter will inject delays based on the values in the + * object. + */ + 'delay': (_envoy_extensions_filters_common_fault_v3_FaultDelay__Output | null); + /** + * If specified, the filter will abort requests based on the values in + * the object. At least ``abort`` or ``delay`` must be specified. + */ + 'abort': (_envoy_extensions_filters_http_fault_v3_FaultAbort__Output | null); + /** + * Specifies the name of the (destination) upstream cluster that the + * filter should match on. Fault injection will be restricted to requests + * bound to the specific upstream cluster. + */ + 'upstream_cluster': (string); + /** + * Specifies a set of headers that the filter should match on. The fault + * injection filter can be applied selectively to requests that match a set of + * headers specified in the fault filter config. The chances of actual fault + * injection further depend on the value of the :ref:`percentage + * ` field. + * The filter will check the request's headers against all the specified + * headers in the filter config. A match will happen if all the headers in the + * config are present in the request with the same values (or based on + * presence if the ``value`` field is not in the config). + */ + 'headers': (_envoy_config_route_v3_HeaderMatcher__Output)[]; + /** + * Faults are injected for the specified list of downstream hosts. If this + * setting is not set, faults are injected for all downstream nodes. + * Downstream node name is taken from :ref:`the HTTP + * x-envoy-downstream-service-node + * ` header and compared + * against downstream_nodes list. + */ + 'downstream_nodes': (string)[]; + /** + * The maximum number of faults that can be active at a single time via the configured fault + * filter. Note that because this setting can be overridden at the route level, it's possible + * for the number of active faults to be greater than this value (if injected via a different + * route). If not specified, defaults to unlimited. This setting can be overridden via + * ``runtime `` and any faults that are not injected + * due to overflow will be indicated via the ``faults_overflow + * `` stat. + * + * .. attention:: + * Like other :ref:`circuit breakers ` in Envoy, this is a fuzzy + * limit. It's possible for the number of active faults to rise slightly above the configured + * amount due to the implementation details. + */ + 'max_active_faults': (_google_protobuf_UInt32Value__Output | null); + /** + * The response rate limit to be applied to the response body of the stream. When configured, + * the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent + * ` runtime key. + * + * .. attention:: + * This is a per-stream limit versus a connection level limit. This means that concurrent streams + * will each get an independent limit. + */ + 'response_rate_limit': (_envoy_extensions_filters_common_fault_v3_FaultRateLimit__Output | null); + /** + * The runtime key to override the :ref:`default ` + * runtime. The default is: fault.http.delay.fixed_delay_percent + */ + 'delay_percent_runtime': (string); + /** + * The runtime key to override the :ref:`default ` + * runtime. The default is: fault.http.abort.abort_percent + */ + 'abort_percent_runtime': (string); + /** + * The runtime key to override the :ref:`default ` + * runtime. The default is: fault.http.delay.fixed_duration_ms + */ + 'delay_duration_runtime': (string); + /** + * The runtime key to override the :ref:`default ` + * runtime. The default is: fault.http.abort.http_status + */ + 'abort_http_status_runtime': (string); + /** + * The runtime key to override the :ref:`default ` + * runtime. The default is: fault.http.max_active_faults + */ + 'max_active_faults_runtime': (string); + /** + * The runtime key to override the :ref:`default ` + * runtime. The default is: fault.http.rate_limit.response_percent + */ + 'response_rate_limit_percent_runtime': (string); + /** + * The runtime key to override the :ref:`default ` + * runtime. The default is: fault.http.abort.grpc_status + */ + 'abort_grpc_status_runtime': (string); + /** + * To control whether stats storage is allocated dynamically for each downstream server. + * If set to true, "x-envoy-downstream-service-cluster" field of header will be ignored by this filter. + * If set to false, dynamic stats storage will be allocated for the downstream cluster name. + * Default value is false. + */ + 'disable_downstream_cluster_stats': (boolean); + /** + * When an abort or delay fault is executed, the metadata struct provided here will be added to the + * request's dynamic metadata under the namespace corresponding to the name of the fault filter. + * This data can be logged as part of Access Logs using the :ref:`command operator + * ` %DYNAMIC_METADATA(NAMESPACE)%, where NAMESPACE is the name of + * the fault filter. + */ + 'filter_metadata': (_google_protobuf_Struct__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/EnvoyMobileHttpConnectionManager.ts b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/EnvoyMobileHttpConnectionManager.ts new file mode 100644 index 000000000..eb73721c3 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/EnvoyMobileHttpConnectionManager.ts @@ -0,0 +1,29 @@ +// Original file: deps/envoy-api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto + +import type { HttpConnectionManager as _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager, HttpConnectionManager__Output as _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager__Output } from '../../../../../../envoy/extensions/filters/network/http_connection_manager/v3/HttpConnectionManager'; + +/** + * [#protodoc-title: Envoy Mobile HTTP connection manager] + * HTTP connection manager for use in Envoy mobile. + * [#extension: envoy.filters.network.envoy_mobile_http_connection_manager] + */ +export interface EnvoyMobileHttpConnectionManager { + /** + * The configuration for the underlying HttpConnectionManager which will be + * instantiated for Envoy mobile. + */ + 'config'?: (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager | null); +} + +/** + * [#protodoc-title: Envoy Mobile HTTP connection manager] + * HTTP connection manager for use in Envoy mobile. + * [#extension: envoy.filters.network.envoy_mobile_http_connection_manager] + */ +export interface EnvoyMobileHttpConnectionManager__Output { + /** + * The configuration for the underlying HttpConnectionManager which will be + * instantiated for Envoy mobile. + */ + 'config': (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/HttpConnectionManager.ts b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/HttpConnectionManager.ts new file mode 100644 index 000000000..1a452635c --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/HttpConnectionManager.ts @@ -0,0 +1,1895 @@ +// Original file: deps/envoy-api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto + +import type { Rds as _envoy_extensions_filters_network_http_connection_manager_v3_Rds, Rds__Output as _envoy_extensions_filters_network_http_connection_manager_v3_Rds__Output } from '../../../../../../envoy/extensions/filters/network/http_connection_manager/v3/Rds'; +import type { RouteConfiguration as _envoy_config_route_v3_RouteConfiguration, RouteConfiguration__Output as _envoy_config_route_v3_RouteConfiguration__Output } from '../../../../../../envoy/config/route/v3/RouteConfiguration'; +import type { HttpFilter as _envoy_extensions_filters_network_http_connection_manager_v3_HttpFilter, HttpFilter__Output as _envoy_extensions_filters_network_http_connection_manager_v3_HttpFilter__Output } from '../../../../../../envoy/extensions/filters/network/http_connection_manager/v3/HttpFilter'; +import type { BoolValue as _google_protobuf_BoolValue, BoolValue__Output as _google_protobuf_BoolValue__Output } from '../../../../../../google/protobuf/BoolValue'; +import type { Http1ProtocolOptions as _envoy_config_core_v3_Http1ProtocolOptions, Http1ProtocolOptions__Output as _envoy_config_core_v3_Http1ProtocolOptions__Output } from '../../../../../../envoy/config/core/v3/Http1ProtocolOptions'; +import type { Http2ProtocolOptions as _envoy_config_core_v3_Http2ProtocolOptions, Http2ProtocolOptions__Output as _envoy_config_core_v3_Http2ProtocolOptions__Output } from '../../../../../../envoy/config/core/v3/Http2ProtocolOptions'; +import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../../../google/protobuf/Duration'; +import type { AccessLog as _envoy_config_accesslog_v3_AccessLog, AccessLog__Output as _envoy_config_accesslog_v3_AccessLog__Output } from '../../../../../../envoy/config/accesslog/v3/AccessLog'; +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../../../google/protobuf/UInt32Value'; +import type { ScopedRoutes as _envoy_extensions_filters_network_http_connection_manager_v3_ScopedRoutes, ScopedRoutes__Output as _envoy_extensions_filters_network_http_connection_manager_v3_ScopedRoutes__Output } from '../../../../../../envoy/extensions/filters/network/http_connection_manager/v3/ScopedRoutes'; +import type { HttpProtocolOptions as _envoy_config_core_v3_HttpProtocolOptions, HttpProtocolOptions__Output as _envoy_config_core_v3_HttpProtocolOptions__Output } from '../../../../../../envoy/config/core/v3/HttpProtocolOptions'; +import type { RequestIDExtension as _envoy_extensions_filters_network_http_connection_manager_v3_RequestIDExtension, RequestIDExtension__Output as _envoy_extensions_filters_network_http_connection_manager_v3_RequestIDExtension__Output } from '../../../../../../envoy/extensions/filters/network/http_connection_manager/v3/RequestIDExtension'; +import type { LocalReplyConfig as _envoy_extensions_filters_network_http_connection_manager_v3_LocalReplyConfig, LocalReplyConfig__Output as _envoy_extensions_filters_network_http_connection_manager_v3_LocalReplyConfig__Output } from '../../../../../../envoy/extensions/filters/network/http_connection_manager/v3/LocalReplyConfig'; +import type { Http3ProtocolOptions as _envoy_config_core_v3_Http3ProtocolOptions, Http3ProtocolOptions__Output as _envoy_config_core_v3_Http3ProtocolOptions__Output } from '../../../../../../envoy/config/core/v3/Http3ProtocolOptions'; +import type { TypedExtensionConfig as _envoy_config_core_v3_TypedExtensionConfig, TypedExtensionConfig__Output as _envoy_config_core_v3_TypedExtensionConfig__Output } from '../../../../../../envoy/config/core/v3/TypedExtensionConfig'; +import type { SchemeHeaderTransformation as _envoy_config_core_v3_SchemeHeaderTransformation, SchemeHeaderTransformation__Output as _envoy_config_core_v3_SchemeHeaderTransformation__Output } from '../../../../../../envoy/config/core/v3/SchemeHeaderTransformation'; +import type { Percent as _envoy_type_v3_Percent, Percent__Output as _envoy_type_v3_Percent__Output } from '../../../../../../envoy/type/v3/Percent'; +import type { CustomTag as _envoy_type_tracing_v3_CustomTag, CustomTag__Output as _envoy_type_tracing_v3_CustomTag__Output } from '../../../../../../envoy/type/tracing/v3/CustomTag'; +import type { _envoy_config_trace_v3_Tracing_Http, _envoy_config_trace_v3_Tracing_Http__Output } from '../../../../../../envoy/config/trace/v3/Tracing'; +import type { CidrRange as _envoy_config_core_v3_CidrRange, CidrRange__Output as _envoy_config_core_v3_CidrRange__Output } from '../../../../../../envoy/config/core/v3/CidrRange'; +import type { PathTransformation as _envoy_type_http_v3_PathTransformation, PathTransformation__Output as _envoy_type_http_v3_PathTransformation__Output } from '../../../../../../envoy/type/http/v3/PathTransformation'; + +// Original file: deps/envoy-api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto + +export const _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_CodecType = { + /** + * For every new connection, the connection manager will determine which + * codec to use. This mode supports both ALPN for TLS listeners as well as + * protocol inference for plaintext listeners. If ALPN data is available, it + * is preferred, otherwise protocol inference is used. In almost all cases, + * this is the right option to choose for this setting. + */ + AUTO: 'AUTO', + /** + * The connection manager will assume that the client is speaking HTTP/1.1. + */ + HTTP1: 'HTTP1', + /** + * The connection manager will assume that the client is speaking HTTP/2 + * (Envoy does not require HTTP/2 to take place over TLS or to use ALPN. + * Prior knowledge is allowed). + */ + HTTP2: 'HTTP2', + /** + * [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with + * caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient + * to distinguish HTTP1 and HTTP2 traffic. + */ + HTTP3: 'HTTP3', +} as const; + +export type _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_CodecType = + /** + * For every new connection, the connection manager will determine which + * codec to use. This mode supports both ALPN for TLS listeners as well as + * protocol inference for plaintext listeners. If ALPN data is available, it + * is preferred, otherwise protocol inference is used. In almost all cases, + * this is the right option to choose for this setting. + */ + | 'AUTO' + | 0 + /** + * The connection manager will assume that the client is speaking HTTP/1.1. + */ + | 'HTTP1' + | 1 + /** + * The connection manager will assume that the client is speaking HTTP/2 + * (Envoy does not require HTTP/2 to take place over TLS or to use ALPN. + * Prior knowledge is allowed). + */ + | 'HTTP2' + | 2 + /** + * [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with + * caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient + * to distinguish HTTP1 and HTTP2 traffic. + */ + | 'HTTP3' + | 3 + +export type _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_CodecType__Output = typeof _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_CodecType[keyof typeof _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_CodecType] + +// Original file: deps/envoy-api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto + +/** + * How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP + * header. + */ +export const _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_ForwardClientCertDetails = { + /** + * Do not send the XFCC header to the next hop. This is the default value. + */ + SANITIZE: 'SANITIZE', + /** + * When the client connection is mTLS (Mutual TLS), forward the XFCC header + * in the request. + */ + FORWARD_ONLY: 'FORWARD_ONLY', + /** + * When the client connection is mTLS, append the client certificate + * information to the request’s XFCC header and forward it. + */ + APPEND_FORWARD: 'APPEND_FORWARD', + /** + * When the client connection is mTLS, reset the XFCC header with the client + * certificate information and send it to the next hop. + */ + SANITIZE_SET: 'SANITIZE_SET', + /** + * Always forward the XFCC header in the request, regardless of whether the + * client connection is mTLS. + */ + ALWAYS_FORWARD_ONLY: 'ALWAYS_FORWARD_ONLY', +} as const; + +/** + * How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP + * header. + */ +export type _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_ForwardClientCertDetails = + /** + * Do not send the XFCC header to the next hop. This is the default value. + */ + | 'SANITIZE' + | 0 + /** + * When the client connection is mTLS (Mutual TLS), forward the XFCC header + * in the request. + */ + | 'FORWARD_ONLY' + | 1 + /** + * When the client connection is mTLS, append the client certificate + * information to the request’s XFCC header and forward it. + */ + | 'APPEND_FORWARD' + | 2 + /** + * When the client connection is mTLS, reset the XFCC header with the client + * certificate information and send it to the next hop. + */ + | 'SANITIZE_SET' + | 3 + /** + * Always forward the XFCC header in the request, regardless of whether the + * client connection is mTLS. + */ + | 'ALWAYS_FORWARD_ONLY' + | 4 + +/** + * How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP + * header. + */ +export type _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_ForwardClientCertDetails__Output = typeof _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_ForwardClientCertDetails[keyof typeof _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_ForwardClientCertDetails] + +export interface _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_HcmAccessLogOptions { + /** + * The interval to flush the above access logs. By default, the HCM will flush exactly one access log + * on stream close, when the HTTP request is complete. If this field is set, the HCM will flush access + * logs periodically at the specified interval. This is especially useful in the case of long-lived + * requests, such as CONNECT and Websockets. Final access logs can be detected via the + * `requestComplete()` method of `StreamInfo` in access log filters, or thru the `%DURATION%` substitution + * string. + * The interval must be at least 1 millisecond. + */ + 'access_log_flush_interval'?: (_google_protobuf_Duration | null); + /** + * If set to true, HCM will flush an access log when a new HTTP request is received, after request + * headers have been evaluated, before iterating through the HTTP filter chain. + * This log record, if enabled, does not depend on periodic log records or request completion log. + * Details related to upstream cluster, such as upstream host, will not be available for this log. + */ + 'flush_access_log_on_new_request'?: (boolean); + /** + * If true, the HCM will flush an access log when a tunnel is successfully established. For example, + * this could be when an upstream has successfully returned 101 Switching Protocols, or when the proxy + * has returned 200 to a CONNECT request. + */ + 'flush_log_on_tunnel_successfully_established'?: (boolean); +} + +export interface _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_HcmAccessLogOptions__Output { + /** + * The interval to flush the above access logs. By default, the HCM will flush exactly one access log + * on stream close, when the HTTP request is complete. If this field is set, the HCM will flush access + * logs periodically at the specified interval. This is especially useful in the case of long-lived + * requests, such as CONNECT and Websockets. Final access logs can be detected via the + * `requestComplete()` method of `StreamInfo` in access log filters, or thru the `%DURATION%` substitution + * string. + * The interval must be at least 1 millisecond. + */ + 'access_log_flush_interval': (_google_protobuf_Duration__Output | null); + /** + * If set to true, HCM will flush an access log when a new HTTP request is received, after request + * headers have been evaluated, before iterating through the HTTP filter chain. + * This log record, if enabled, does not depend on periodic log records or request completion log. + * Details related to upstream cluster, such as upstream host, will not be available for this log. + */ + 'flush_access_log_on_new_request': (boolean); + /** + * If true, the HCM will flush an access log when a tunnel is successfully established. For example, + * this could be when an upstream has successfully returned 101 Switching Protocols, or when the proxy + * has returned 200 to a CONNECT request. + */ + 'flush_log_on_tunnel_successfully_established': (boolean); +} + +export interface _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_InternalAddressConfig { + /** + * Whether unix socket addresses should be considered internal. + */ + 'unix_sockets'?: (boolean); + /** + * List of CIDR ranges that are treated as internal. If unset, then RFC1918 / RFC4193 + * IP addresses will be considered internal. + */ + 'cidr_ranges'?: (_envoy_config_core_v3_CidrRange)[]; +} + +export interface _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_InternalAddressConfig__Output { + /** + * Whether unix socket addresses should be considered internal. + */ + 'unix_sockets': (boolean); + /** + * List of CIDR ranges that are treated as internal. If unset, then RFC1918 / RFC4193 + * IP addresses will be considered internal. + */ + 'cidr_ranges': (_envoy_config_core_v3_CidrRange__Output)[]; +} + +// Original file: deps/envoy-api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto + +export const _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_Tracing_OperationName = { + /** + * The HTTP listener is used for ingress/incoming requests. + */ + INGRESS: 'INGRESS', + /** + * The HTTP listener is used for egress/outgoing requests. + */ + EGRESS: 'EGRESS', +} as const; + +export type _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_Tracing_OperationName = + /** + * The HTTP listener is used for ingress/incoming requests. + */ + | 'INGRESS' + | 0 + /** + * The HTTP listener is used for egress/outgoing requests. + */ + | 'EGRESS' + | 1 + +export type _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_Tracing_OperationName__Output = typeof _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_Tracing_OperationName[keyof typeof _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_Tracing_OperationName] + +/** + * [#not-implemented-hide:] Transformations that apply to path headers. Transformations are applied + * before any processing of requests by HTTP filters, routing, and matching. Only the normalized + * path will be visible internally if a transformation is enabled. Any path rewrites that the + * router performs (e.g. :ref:`regex_rewrite + * ` or :ref:`prefix_rewrite + * `) will apply to the ``:path`` header + * destined for the upstream. + * + * Note: access logging and tracing will show the original ``:path`` header. + */ +export interface _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_PathNormalizationOptions { + /** + * [#not-implemented-hide:] Normalization applies internally before any processing of requests by + * HTTP filters, routing, and matching *and* will affect the forwarded ``:path`` header. Defaults + * to :ref:`NormalizePathRFC3986 + * `. When not + * specified, this value may be overridden by the runtime variable + * :ref:`http_connection_manager.normalize_path`. + * Envoy will respond with 400 to paths that are malformed (e.g. for paths that fail RFC 3986 + * normalization due to disallowed characters.) + */ + 'forwarding_transformation'?: (_envoy_type_http_v3_PathTransformation | null); + /** + * [#not-implemented-hide:] Normalization only applies internally before any processing of + * requests by HTTP filters, routing, and matching. These will be applied after full + * transformation is applied. The ``:path`` header before this transformation will be restored in + * the router filter and sent upstream unless it was mutated by a filter. Defaults to no + * transformations. + * Multiple actions can be applied in the same Transformation, forming a sequential + * pipeline. The transformations will be performed in the order that they appear. Envoy will + * respond with 400 to paths that are malformed (e.g. for paths that fail RFC 3986 + * normalization due to disallowed characters.) + */ + 'http_filter_transformation'?: (_envoy_type_http_v3_PathTransformation | null); +} + +/** + * [#not-implemented-hide:] Transformations that apply to path headers. Transformations are applied + * before any processing of requests by HTTP filters, routing, and matching. Only the normalized + * path will be visible internally if a transformation is enabled. Any path rewrites that the + * router performs (e.g. :ref:`regex_rewrite + * ` or :ref:`prefix_rewrite + * `) will apply to the ``:path`` header + * destined for the upstream. + * + * Note: access logging and tracing will show the original ``:path`` header. + */ +export interface _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_PathNormalizationOptions__Output { + /** + * [#not-implemented-hide:] Normalization applies internally before any processing of requests by + * HTTP filters, routing, and matching *and* will affect the forwarded ``:path`` header. Defaults + * to :ref:`NormalizePathRFC3986 + * `. When not + * specified, this value may be overridden by the runtime variable + * :ref:`http_connection_manager.normalize_path`. + * Envoy will respond with 400 to paths that are malformed (e.g. for paths that fail RFC 3986 + * normalization due to disallowed characters.) + */ + 'forwarding_transformation': (_envoy_type_http_v3_PathTransformation__Output | null); + /** + * [#not-implemented-hide:] Normalization only applies internally before any processing of + * requests by HTTP filters, routing, and matching. These will be applied after full + * transformation is applied. The ``:path`` header before this transformation will be restored in + * the router filter and sent upstream unless it was mutated by a filter. Defaults to no + * transformations. + * Multiple actions can be applied in the same Transformation, forming a sequential + * pipeline. The transformations will be performed in the order that they appear. Envoy will + * respond with 400 to paths that are malformed (e.g. for paths that fail RFC 3986 + * normalization due to disallowed characters.) + */ + 'http_filter_transformation': (_envoy_type_http_v3_PathTransformation__Output | null); +} + +// Original file: deps/envoy-api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto + +/** + * Determines the action for request that contain %2F, %2f, %5C or %5c sequences in the URI path. + * This operation occurs before URL normalization and the merge slashes transformations if they were enabled. + */ +export const _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_PathWithEscapedSlashesAction = { + /** + * Default behavior specific to implementation (i.e. Envoy) of this configuration option. + * Envoy, by default, takes the KEEP_UNCHANGED action. + * NOTE: the implementation may change the default behavior at-will. + */ + IMPLEMENTATION_SPECIFIC_DEFAULT: 'IMPLEMENTATION_SPECIFIC_DEFAULT', + /** + * Keep escaped slashes. + */ + KEEP_UNCHANGED: 'KEEP_UNCHANGED', + /** + * Reject client request with the 400 status. gRPC requests will be rejected with the INTERNAL (13) error code. + * The "httpN.downstream_rq_failed_path_normalization" counter is incremented for each rejected request. + */ + REJECT_REQUEST: 'REJECT_REQUEST', + /** + * Unescape %2F and %5C sequences and redirect request to the new path if these sequences were present. + * Redirect occurs after path normalization and merge slashes transformations if they were configured. + * NOTE: gRPC requests will be rejected with the INTERNAL (13) error code. + * This option minimizes possibility of path confusion exploits by forcing request with unescaped slashes to + * traverse all parties: downstream client, intermediate proxies, Envoy and upstream server. + * The "httpN.downstream_rq_redirected_with_normalized_path" counter is incremented for each + * redirected request. + */ + UNESCAPE_AND_REDIRECT: 'UNESCAPE_AND_REDIRECT', + /** + * Unescape %2F and %5C sequences. + * Note: this option should not be enabled if intermediaries perform path based access control as + * it may lead to path confusion vulnerabilities. + */ + UNESCAPE_AND_FORWARD: 'UNESCAPE_AND_FORWARD', +} as const; + +/** + * Determines the action for request that contain %2F, %2f, %5C or %5c sequences in the URI path. + * This operation occurs before URL normalization and the merge slashes transformations if they were enabled. + */ +export type _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_PathWithEscapedSlashesAction = + /** + * Default behavior specific to implementation (i.e. Envoy) of this configuration option. + * Envoy, by default, takes the KEEP_UNCHANGED action. + * NOTE: the implementation may change the default behavior at-will. + */ + | 'IMPLEMENTATION_SPECIFIC_DEFAULT' + | 0 + /** + * Keep escaped slashes. + */ + | 'KEEP_UNCHANGED' + | 1 + /** + * Reject client request with the 400 status. gRPC requests will be rejected with the INTERNAL (13) error code. + * The "httpN.downstream_rq_failed_path_normalization" counter is incremented for each rejected request. + */ + | 'REJECT_REQUEST' + | 2 + /** + * Unescape %2F and %5C sequences and redirect request to the new path if these sequences were present. + * Redirect occurs after path normalization and merge slashes transformations if they were configured. + * NOTE: gRPC requests will be rejected with the INTERNAL (13) error code. + * This option minimizes possibility of path confusion exploits by forcing request with unescaped slashes to + * traverse all parties: downstream client, intermediate proxies, Envoy and upstream server. + * The "httpN.downstream_rq_redirected_with_normalized_path" counter is incremented for each + * redirected request. + */ + | 'UNESCAPE_AND_REDIRECT' + | 3 + /** + * Unescape %2F and %5C sequences. + * Note: this option should not be enabled if intermediaries perform path based access control as + * it may lead to path confusion vulnerabilities. + */ + | 'UNESCAPE_AND_FORWARD' + | 4 + +/** + * Determines the action for request that contain %2F, %2f, %5C or %5c sequences in the URI path. + * This operation occurs before URL normalization and the merge slashes transformations if they were enabled. + */ +export type _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_PathWithEscapedSlashesAction__Output = typeof _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_PathWithEscapedSlashesAction[keyof typeof _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_PathWithEscapedSlashesAction] + +/** + * Configures the manner in which the Proxy-Status HTTP response header is + * populated. + * + * See the [Proxy-Status + * RFC](https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-proxy-status-08). + * [#comment:TODO: Update this with the non-draft URL when finalized.] + * + * The Proxy-Status header is a string of the form: + * + * "; error=; details=
" + * [#next-free-field: 7] + */ +export interface _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_ProxyStatusConfig { + /** + * If true, the details field of the Proxy-Status header is not populated with stream_info.response_code_details. + * This value defaults to ``false``, i.e. the ``details`` field is populated by default. + */ + 'remove_details'?: (boolean); + /** + * If true, the details field of the Proxy-Status header will not contain + * connection termination details. This value defaults to ``false``, i.e. the + * ``details`` field will contain connection termination details by default. + */ + 'remove_connection_termination_details'?: (boolean); + /** + * If true, the details field of the Proxy-Status header will not contain an + * enumeration of the Envoy ResponseFlags. This value defaults to ``false``, + * i.e. the ``details`` field will contain a list of ResponseFlags by default. + */ + 'remove_response_flags'?: (boolean); + /** + * If true, overwrites the existing Status header with the response code + * recommended by the Proxy-Status spec. + * This value defaults to ``false``, i.e. the HTTP response code is not + * overwritten. + */ + 'set_recommended_response_code'?: (boolean); + /** + * If ``use_node_id`` is set, Proxy-Status headers will use the Envoy's node + * ID as the name of the proxy. + */ + 'use_node_id'?: (boolean); + /** + * If ``literal_proxy_name`` is set, Proxy-Status headers will use this + * value as the name of the proxy. + */ + 'literal_proxy_name'?: (string); + /** + * The name of the proxy as it appears at the start of the Proxy-Status + * header. + * + * If neither of these values are set, this value defaults to ``server_name``, + * which itself defaults to "envoy". + */ + 'proxy_name'?: "use_node_id"|"literal_proxy_name"; +} + +/** + * Configures the manner in which the Proxy-Status HTTP response header is + * populated. + * + * See the [Proxy-Status + * RFC](https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-proxy-status-08). + * [#comment:TODO: Update this with the non-draft URL when finalized.] + * + * The Proxy-Status header is a string of the form: + * + * "; error=; details=
" + * [#next-free-field: 7] + */ +export interface _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_ProxyStatusConfig__Output { + /** + * If true, the details field of the Proxy-Status header is not populated with stream_info.response_code_details. + * This value defaults to ``false``, i.e. the ``details`` field is populated by default. + */ + 'remove_details': (boolean); + /** + * If true, the details field of the Proxy-Status header will not contain + * connection termination details. This value defaults to ``false``, i.e. the + * ``details`` field will contain connection termination details by default. + */ + 'remove_connection_termination_details': (boolean); + /** + * If true, the details field of the Proxy-Status header will not contain an + * enumeration of the Envoy ResponseFlags. This value defaults to ``false``, + * i.e. the ``details`` field will contain a list of ResponseFlags by default. + */ + 'remove_response_flags': (boolean); + /** + * If true, overwrites the existing Status header with the response code + * recommended by the Proxy-Status spec. + * This value defaults to ``false``, i.e. the HTTP response code is not + * overwritten. + */ + 'set_recommended_response_code': (boolean); + /** + * If ``use_node_id`` is set, Proxy-Status headers will use the Envoy's node + * ID as the name of the proxy. + */ + 'use_node_id'?: (boolean); + /** + * If ``literal_proxy_name`` is set, Proxy-Status headers will use this + * value as the name of the proxy. + */ + 'literal_proxy_name'?: (string); + /** + * The name of the proxy as it appears at the start of the Proxy-Status + * header. + * + * If neither of these values are set, this value defaults to ``server_name``, + * which itself defaults to "envoy". + */ + 'proxy_name': "use_node_id"|"literal_proxy_name"; +} + +// Original file: deps/envoy-api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto + +export const _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_ServerHeaderTransformation = { + /** + * Overwrite any Server header with the contents of server_name. + */ + OVERWRITE: 'OVERWRITE', + /** + * If no Server header is present, append Server server_name + * If a Server header is present, pass it through. + */ + APPEND_IF_ABSENT: 'APPEND_IF_ABSENT', + /** + * Pass through the value of the server header, and do not append a header + * if none is present. + */ + PASS_THROUGH: 'PASS_THROUGH', +} as const; + +export type _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_ServerHeaderTransformation = + /** + * Overwrite any Server header with the contents of server_name. + */ + | 'OVERWRITE' + | 0 + /** + * If no Server header is present, append Server server_name + * If a Server header is present, pass it through. + */ + | 'APPEND_IF_ABSENT' + | 1 + /** + * Pass through the value of the server header, and do not append a header + * if none is present. + */ + | 'PASS_THROUGH' + | 2 + +export type _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_ServerHeaderTransformation__Output = typeof _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_ServerHeaderTransformation[keyof typeof _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_ServerHeaderTransformation] + +/** + * [#next-free-field: 7] + */ +export interface _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_SetCurrentClientCertDetails { + /** + * Whether to forward the subject of the client cert. Defaults to false. + */ + 'subject'?: (_google_protobuf_BoolValue | null); + /** + * Whether to forward the entire client cert in URL encoded PEM format. This will appear in the + * XFCC header comma separated from other values with the value Cert="PEM". + * Defaults to false. + */ + 'cert'?: (boolean); + /** + * Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM + * format. This will appear in the XFCC header comma separated from other values with the value + * Chain="PEM". + * Defaults to false. + */ + 'chain'?: (boolean); + /** + * Whether to forward the DNS type Subject Alternative Names of the client cert. + * Defaults to false. + */ + 'dns'?: (boolean); + /** + * Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to + * false. + */ + 'uri'?: (boolean); +} + +/** + * [#next-free-field: 7] + */ +export interface _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_SetCurrentClientCertDetails__Output { + /** + * Whether to forward the subject of the client cert. Defaults to false. + */ + 'subject': (_google_protobuf_BoolValue__Output | null); + /** + * Whether to forward the entire client cert in URL encoded PEM format. This will appear in the + * XFCC header comma separated from other values with the value Cert="PEM". + * Defaults to false. + */ + 'cert': (boolean); + /** + * Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM + * format. This will appear in the XFCC header comma separated from other values with the value + * Chain="PEM". + * Defaults to false. + */ + 'chain': (boolean); + /** + * Whether to forward the DNS type Subject Alternative Names of the client cert. + * Defaults to false. + */ + 'dns': (boolean); + /** + * Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to + * false. + */ + 'uri': (boolean); +} + +/** + * [#next-free-field: 10] + */ +export interface _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_Tracing { + /** + * Target percentage of requests managed by this HTTP connection manager that will be force + * traced if the :ref:`x-client-trace-id ` + * header is set. This field is a direct analog for the runtime variable + * 'tracing.client_enabled' in the :ref:`HTTP Connection Manager + * `. + * Default: 100% + */ + 'client_sampling'?: (_envoy_type_v3_Percent | null); + /** + * Target percentage of requests managed by this HTTP connection manager that will be randomly + * selected for trace generation, if not requested by the client or not forced. This field is + * a direct analog for the runtime variable 'tracing.random_sampling' in the + * :ref:`HTTP Connection Manager `. + * Default: 100% + */ + 'random_sampling'?: (_envoy_type_v3_Percent | null); + /** + * Target percentage of requests managed by this HTTP connection manager that will be traced + * after all other sampling checks have been applied (client-directed, force tracing, random + * sampling). This field functions as an upper limit on the total configured sampling rate. For + * instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% + * of client requests with the appropriate headers to be force traced. This field is a direct + * analog for the runtime variable 'tracing.global_enabled' in the + * :ref:`HTTP Connection Manager `. + * Default: 100% + */ + 'overall_sampling'?: (_envoy_type_v3_Percent | null); + /** + * Whether to annotate spans with additional data. If true, spans will include logs for stream + * events. + */ + 'verbose'?: (boolean); + /** + * Maximum length of the request path to extract and include in the HttpUrl tag. Used to + * truncate lengthy request paths to meet the needs of a tracing backend. + * Default: 256 + */ + 'max_path_tag_length'?: (_google_protobuf_UInt32Value | null); + /** + * A list of custom tags with unique tag name to create tags for the active span. + */ + 'custom_tags'?: (_envoy_type_tracing_v3_CustomTag)[]; + /** + * Configuration for an external tracing provider. + * If not specified, no tracing will be performed. + * + * .. attention:: + * Please be aware that ``envoy.tracers.opencensus`` provider can only be configured once + * in Envoy lifetime. + * Any attempts to reconfigure it or to use different configurations for different HCM filters + * will be rejected. + * Such a constraint is inherent to OpenCensus itself. It cannot be overcome without changes + * on OpenCensus side. + */ + 'provider'?: (_envoy_config_trace_v3_Tracing_Http | null); +} + +/** + * [#next-free-field: 10] + */ +export interface _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_Tracing__Output { + /** + * Target percentage of requests managed by this HTTP connection manager that will be force + * traced if the :ref:`x-client-trace-id ` + * header is set. This field is a direct analog for the runtime variable + * 'tracing.client_enabled' in the :ref:`HTTP Connection Manager + * `. + * Default: 100% + */ + 'client_sampling': (_envoy_type_v3_Percent__Output | null); + /** + * Target percentage of requests managed by this HTTP connection manager that will be randomly + * selected for trace generation, if not requested by the client or not forced. This field is + * a direct analog for the runtime variable 'tracing.random_sampling' in the + * :ref:`HTTP Connection Manager `. + * Default: 100% + */ + 'random_sampling': (_envoy_type_v3_Percent__Output | null); + /** + * Target percentage of requests managed by this HTTP connection manager that will be traced + * after all other sampling checks have been applied (client-directed, force tracing, random + * sampling). This field functions as an upper limit on the total configured sampling rate. For + * instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% + * of client requests with the appropriate headers to be force traced. This field is a direct + * analog for the runtime variable 'tracing.global_enabled' in the + * :ref:`HTTP Connection Manager `. + * Default: 100% + */ + 'overall_sampling': (_envoy_type_v3_Percent__Output | null); + /** + * Whether to annotate spans with additional data. If true, spans will include logs for stream + * events. + */ + 'verbose': (boolean); + /** + * Maximum length of the request path to extract and include in the HttpUrl tag. Used to + * truncate lengthy request paths to meet the needs of a tracing backend. + * Default: 256 + */ + 'max_path_tag_length': (_google_protobuf_UInt32Value__Output | null); + /** + * A list of custom tags with unique tag name to create tags for the active span. + */ + 'custom_tags': (_envoy_type_tracing_v3_CustomTag__Output)[]; + /** + * Configuration for an external tracing provider. + * If not specified, no tracing will be performed. + * + * .. attention:: + * Please be aware that ``envoy.tracers.opencensus`` provider can only be configured once + * in Envoy lifetime. + * Any attempts to reconfigure it or to use different configurations for different HCM filters + * will be rejected. + * Such a constraint is inherent to OpenCensus itself. It cannot be overcome without changes + * on OpenCensus side. + */ + 'provider': (_envoy_config_trace_v3_Tracing_Http__Output | null); +} + +/** + * The configuration for HTTP upgrades. + * For each upgrade type desired, an UpgradeConfig must be added. + * + * .. warning:: + * + * The current implementation of upgrade headers does not handle + * multi-valued upgrade headers. Support for multi-valued headers may be + * added in the future if needed. + * + * .. warning:: + * The current implementation of upgrade headers does not work with HTTP/2 + * upstreams. + */ +export interface _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_UpgradeConfig { + /** + * The case-insensitive name of this upgrade, e.g. "websocket". + * For each upgrade type present in upgrade_configs, requests with + * Upgrade: [upgrade_type] + * will be proxied upstream. + */ + 'upgrade_type'?: (string); + /** + * If present, this represents the filter chain which will be created for + * this type of upgrade. If no filters are present, the filter chain for + * HTTP connections will be used for this upgrade type. + */ + 'filters'?: (_envoy_extensions_filters_network_http_connection_manager_v3_HttpFilter)[]; + /** + * Determines if upgrades are enabled or disabled by default. Defaults to true. + * This can be overridden on a per-route basis with :ref:`cluster + * ` as documented in the + * :ref:`upgrade documentation `. + */ + 'enabled'?: (_google_protobuf_BoolValue | null); +} + +/** + * The configuration for HTTP upgrades. + * For each upgrade type desired, an UpgradeConfig must be added. + * + * .. warning:: + * + * The current implementation of upgrade headers does not handle + * multi-valued upgrade headers. Support for multi-valued headers may be + * added in the future if needed. + * + * .. warning:: + * The current implementation of upgrade headers does not work with HTTP/2 + * upstreams. + */ +export interface _envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_UpgradeConfig__Output { + /** + * The case-insensitive name of this upgrade, e.g. "websocket". + * For each upgrade type present in upgrade_configs, requests with + * Upgrade: [upgrade_type] + * will be proxied upstream. + */ + 'upgrade_type': (string); + /** + * If present, this represents the filter chain which will be created for + * this type of upgrade. If no filters are present, the filter chain for + * HTTP connections will be used for this upgrade type. + */ + 'filters': (_envoy_extensions_filters_network_http_connection_manager_v3_HttpFilter__Output)[]; + /** + * Determines if upgrades are enabled or disabled by default. Defaults to true. + * This can be overridden on a per-route basis with :ref:`cluster + * ` as documented in the + * :ref:`upgrade documentation `. + */ + 'enabled': (_google_protobuf_BoolValue__Output | null); +} + +/** + * [#next-free-field: 57] + */ +export interface HttpConnectionManager { + /** + * Supplies the type of codec that the connection manager should use. + */ + 'codec_type'?: (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_CodecType); + /** + * The human readable prefix to use when emitting statistics for the + * connection manager. See the :ref:`statistics documentation ` for + * more information. + */ + 'stat_prefix'?: (string); + /** + * The connection manager’s route table will be dynamically loaded via the RDS API. + */ + 'rds'?: (_envoy_extensions_filters_network_http_connection_manager_v3_Rds | null); + /** + * The route table for the connection manager is static and is specified in this property. + */ + 'route_config'?: (_envoy_config_route_v3_RouteConfiguration | null); + /** + * A list of individual HTTP filters that make up the filter chain for + * requests made to the connection manager. :ref:`Order matters ` + * as the filters are processed sequentially as request events happen. + */ + 'http_filters'?: (_envoy_extensions_filters_network_http_connection_manager_v3_HttpFilter)[]; + /** + * Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` + * and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked + * documentation for more information. Defaults to false. + */ + 'add_user_agent'?: (_google_protobuf_BoolValue | null); + /** + * Presence of the object defines whether the connection manager + * emits :ref:`tracing ` data to the :ref:`configured tracing provider + * `. + */ + 'tracing'?: (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_Tracing | null); + /** + * Additional HTTP/1 settings that are passed to the HTTP/1 codec. + * [#comment:TODO: The following fields are ignored when the + * :ref:`header validation configuration ` + * is present: + * 1. :ref:`allow_chunked_length `] + */ + 'http_protocol_options'?: (_envoy_config_core_v3_Http1ProtocolOptions | null); + /** + * Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. + */ + 'http2_protocol_options'?: (_envoy_config_core_v3_Http2ProtocolOptions | null); + /** + * An optional override that the connection manager will write to the server + * header in responses. If not set, the default is ``envoy``. + */ + 'server_name'?: (string); + /** + * The time that Envoy will wait between sending an HTTP/2 “shutdown + * notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. + * This is used so that Envoy provides a grace period for new streams that + * race with the final GOAWAY frame. During this grace period, Envoy will + * continue to accept new streams. After the grace period, a final GOAWAY + * frame is sent and Envoy will start refusing new streams. Draining occurs + * both when a connection hits the idle timeout or during general server + * draining. The default grace period is 5000 milliseconds (5 seconds) if this + * option is not specified. + */ + 'drain_timeout'?: (_google_protobuf_Duration | null); + /** + * Configuration for :ref:`HTTP access logs ` + * emitted by the connection manager. + */ + 'access_log'?: (_envoy_config_accesslog_v3_AccessLog)[]; + /** + * If set to true, the connection manager will use the real remote address + * of the client connection when determining internal versus external origin and manipulating + * various headers. If set to false or absent, the connection manager will use the + * :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for + * :ref:`config_http_conn_man_headers_x-forwarded-for`, + * :ref:`config_http_conn_man_headers_x-envoy-internal`, and + * :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. + */ + 'use_remote_address'?: (_google_protobuf_BoolValue | null); + /** + * Whether the connection manager will generate the :ref:`x-request-id + * ` header if it does not exist. This defaults to + * true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature + * is not desired it can be disabled. + */ + 'generate_request_id'?: (_google_protobuf_BoolValue | null); + /** + * How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP + * header. + */ + 'forward_client_cert_details'?: (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_ForwardClientCertDetails); + /** + * This field is valid only when :ref:`forward_client_cert_details + * ` + * is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in + * the client certificate to be forwarded. Note that in the + * :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, ``Hash`` is always set, and + * ``By`` is always set when the client certificate presents the URI type Subject Alternative Name + * value. + */ + 'set_current_client_cert_details'?: (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_SetCurrentClientCertDetails | null); + /** + * If proxy_100_continue is true, Envoy will proxy incoming "Expect: + * 100-continue" headers upstream, and forward "100 Continue" responses + * downstream. If this is false or not set, Envoy will instead strip the + * "Expect: 100-continue" header, and send a "100 Continue" response itself. + */ + 'proxy_100_continue'?: (boolean); + /** + * The number of additional ingress proxy hops from the right side of the + * :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when + * determining the origin client's IP address. The default is zero if this option + * is not specified. See the documentation for + * :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. + */ + 'xff_num_trusted_hops'?: (number); + /** + * If + * :ref:`use_remote_address + * ` + * is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is + * an IPv4 address, the address will be mapped to IPv6 before it is appended to ``x-forwarded-for``. + * This is useful for testing compatibility of upstream services that parse the header value. For + * example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses + * `_ for details. This will also affect the + * :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See + * :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 + * ` for runtime + * control. + * [#not-implemented-hide:] + */ + 'represent_ipv4_remote_address_as_ipv4_mapped_ipv6'?: (boolean); + /** + * If set, Envoy will not append the remote address to the + * :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in + * conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager + * has mutated the request headers. While :ref:`use_remote_address + * ` + * will also suppress XFF addition, it has consequences for logging and other + * Envoy uses of the remote address, so ``skip_xff_append`` should be used + * when only an elision of XFF addition is intended. + */ + 'skip_xff_append'?: (boolean); + /** + * Via header value to append to request and response headers. If this is + * empty, no via header will be appended. + */ + 'via'?: (string); + 'upgrade_configs'?: (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_UpgradeConfig)[]; + /** + * The stream idle timeout for connections managed by the connection manager. + * If not specified, this defaults to 5 minutes. The default value was selected + * so as not to interfere with any smaller configured timeouts that may have + * existed in configurations prior to the introduction of this feature, while + * introducing robustness to TCP connections that terminate without a FIN. + * + * This idle timeout applies to new streams and is overridable by the + * :ref:`route-level idle_timeout + * `. Even on a stream in + * which the override applies, prior to receipt of the initial request + * headers, the :ref:`stream_idle_timeout + * ` + * applies. Each time an encode/decode event for headers or data is processed + * for the stream, the timer will be reset. If the timeout fires, the stream + * is terminated with a 408 Request Timeout error code if no upstream response + * header has been received, otherwise a stream reset occurs. + * + * This timeout also specifies the amount of time that Envoy will wait for the peer to open enough + * window to write any remaining stream data once the entirety of stream data (local end stream is + * true) has been buffered pending available window. In other words, this timeout defends against + * a peer that does not release enough window to completely write the stream, even though all + * data has been proxied within available flow control windows. If the timeout is hit in this + * case, the :ref:`tx_flush_timeout ` counter will be + * incremented. Note that :ref:`max_stream_duration + * ` does not apply to + * this corner case. + * + * If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" + * is configured, this timeout is scaled according to the value for + * :ref:`HTTP_DOWNSTREAM_STREAM_IDLE `. + * + * Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due + * to the granularity of events presented to the connection manager. For example, while receiving + * very large request headers, it may be the case that there is traffic regularly arriving on the + * wire while the connection manage is only able to observe the end-of-headers event, hence the + * stream may still idle timeout. + * + * A value of 0 will completely disable the connection manager stream idle + * timeout, although per-route idle timeout overrides will continue to apply. + */ + 'stream_idle_timeout'?: (_google_protobuf_Duration | null); + /** + * Configures what network addresses are considered internal for stats and header sanitation + * purposes. If unspecified, only RFC1918 IP addresses will be considered internal. + * See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more + * information about internal/external addresses. + */ + 'internal_address_config'?: (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_InternalAddressConfig | null); + /** + * The delayed close timeout is for downstream connections managed by the HTTP connection manager. + * It is defined as a grace period after connection close processing has been locally initiated + * during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy + * from the downstream connection) prior to Envoy closing the socket associated with that + * connection. + * NOTE: This timeout is enforced even when the socket associated with the downstream connection + * is pending a flush of the write buffer. However, any progress made writing data to the socket + * will restart the timer associated with this timeout. This means that the total grace period for + * a socket in this state will be + * +. + * + * Delaying Envoy's connection close and giving the peer the opportunity to initiate the close + * sequence mitigates a race condition that exists when downstream clients do not drain/process + * data in a connection's receive buffer after a remote close has been detected via a socket + * write(). This race leads to such clients failing to process the response code sent by Envoy, + * which could result in erroneous downstream processing. + * + * If the timeout triggers, Envoy will close the connection's socket. + * + * The default timeout is 1000 ms if this option is not specified. + * + * .. NOTE:: + * To be useful in avoiding the race condition described above, this timeout must be set + * to *at least* +<100ms to account for + * a reasonable "worst" case processing time for a full iteration of Envoy's event loop>. + * + * .. WARNING:: + * A value of 0 will completely disable delayed close processing. When disabled, the downstream + * connection's socket will be closed immediately after the write flush is completed or will + * never close if the write flush does not complete. + */ + 'delayed_close_timeout'?: (_google_protobuf_Duration | null); + /** + * The amount of time that Envoy will wait for the entire request to be received. + * The timer is activated when the request is initiated, and is disarmed when the last byte of the + * request is sent upstream (i.e. all decoding filters have processed the request), OR when the + * response is initiated. If not specified or set to 0, this timeout is disabled. + */ + 'request_timeout'?: (_google_protobuf_Duration | null); + /** + * The maximum request headers size for incoming connections. + * If unconfigured, the default max request headers allowed is 60 KiB. + * Requests that exceed this limit will receive a 431 response. + */ + 'max_request_headers_kb'?: (_google_protobuf_UInt32Value | null); + /** + * Should paths be normalized according to RFC 3986 before any processing of + * requests by HTTP filters or routing? This affects the upstream ``:path`` header + * as well. For paths that fail this check, Envoy will respond with 400 to + * paths that are malformed. This defaults to false currently but will default + * true in the future. When not specified, this value may be overridden by the + * runtime variable + * :ref:`http_connection_manager.normalize_path`. + * See `Normalization and Comparison `_ + * for details of normalization. + * Note that Envoy does not perform + * `case normalization `_ + * [#comment:TODO: This field is ignored when the + * :ref:`header validation configuration ` + * is present.] + */ + 'normalize_path'?: (_google_protobuf_BoolValue | null); + /** + * A route table will be dynamically assigned to each request based on request attributes + * (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are + * specified in this message. + */ + 'scoped_routes'?: (_envoy_extensions_filters_network_http_connection_manager_v3_ScopedRoutes | null); + /** + * Whether the connection manager will keep the :ref:`x-request-id + * ` header if passed for a request that is edge + * (Edge request is the request from external clients to front Envoy) and not reset it, which + * is the current Envoy behaviour. This defaults to false. + */ + 'preserve_external_request_id'?: (boolean); + /** + * Determines if adjacent slashes in the path are merged into one before any processing of + * requests by HTTP filters or routing. This affects the upstream ``:path`` header as well. Without + * setting this option, incoming requests with path ``//dir///file`` will not match against route + * with ``prefix`` match set to ``/dir``. Defaults to ``false``. Note that slash merging is not part of + * `HTTP spec `_ and is provided for convenience. + * [#comment:TODO: This field is ignored when the + * :ref:`header validation configuration ` + * is present.] + */ + 'merge_slashes'?: (boolean); + /** + * Defines the action to be applied to the Server header on the response path. + * By default, Envoy will overwrite the header with the value specified in + * server_name. + */ + 'server_header_transformation'?: (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_ServerHeaderTransformation); + /** + * Additional settings for HTTP requests handled by the connection manager. These will be + * applicable to both HTTP1 and HTTP2 requests. + */ + 'common_http_protocol_options'?: (_envoy_config_core_v3_HttpProtocolOptions | null); + /** + * The configuration of the request ID extension. This includes operations such as + * generation, validation, and associated tracing operations. If empty, the + * :ref:`UuidRequestIdConfig ` + * default extension is used with default parameters. See the documentation for that extension + * for details on what it does. Customizing the configuration for the default extension can be + * achieved by configuring it explicitly here. For example, to disable trace reason packing, + * the following configuration can be used: + * + * .. validated-code-block:: yaml + * :type-name: envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension + * + * typed_config: + * "@type": type.googleapis.com/envoy.extensions.request_id.uuid.v3.UuidRequestIdConfig + * pack_trace_reason: false + * + * [#extension-category: envoy.request_id] + */ + 'request_id_extension'?: (_envoy_extensions_filters_network_http_connection_manager_v3_RequestIDExtension | null); + /** + * If set, Envoy will always set :ref:`x-request-id ` header in response. + * If this is false or not set, the request ID is returned in responses only if tracing is forced using + * :ref:`x-envoy-force-trace ` header. + */ + 'always_set_request_id_in_response'?: (boolean); + /** + * The configuration to customize local reply returned by Envoy. It can customize status code, + * body text and response content type. If not specified, status code and text body are hard + * coded in Envoy, the response content type is plain text. + */ + 'local_reply_config'?: (_envoy_extensions_filters_network_http_connection_manager_v3_LocalReplyConfig | null); + /** + * Determines if the port part should be removed from host/authority header before any processing + * of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` + * local port. This affects the upstream host header unless the method is + * CONNECT in which case if no filter adds a port the original port will be restored before headers are + * sent upstream. + * Without setting this option, incoming requests with host ``example:443`` will not match against + * route with :ref:`domains` match set to ``example``. Defaults to ``false``. Note that port removal is not part + * of `HTTP spec `_ and is provided for convenience. + * Only one of ``strip_matching_host_port`` or ``strip_any_host_port`` can be set. + */ + 'strip_matching_host_port'?: (boolean); + /** + * Governs Envoy's behavior when receiving invalid HTTP from downstream. + * If this option is false (default), Envoy will err on the conservative side handling HTTP + * errors, terminating both HTTP/1.1 and HTTP/2 connections when receiving an invalid request. + * If this option is set to true, Envoy will be more permissive, only resetting the invalid + * stream in the case of HTTP/2 and leaving the connection open where possible (if the entire + * request is read for HTTP/1.1) + * In general this should be true for deployments receiving trusted traffic (L2 Envoys, + * company-internal mesh) and false when receiving untrusted traffic (edge deployments). + * + * If different behaviors for invalid_http_message for HTTP/1 and HTTP/2 are + * desired, one should use the new HTTP/1 option :ref:`override_stream_error_on_invalid_http_message + * ` or the new HTTP/2 option + * :ref:`override_stream_error_on_invalid_http_message + * ` + * ``not`` the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging + * ` + */ + 'stream_error_on_invalid_http_message'?: (_google_protobuf_BoolValue | null); + /** + * The amount of time that Envoy will wait for the request headers to be received. The timer is + * activated when the first byte of the headers is received, and is disarmed when the last byte of + * the headers has been received. If not specified or set to 0, this timeout is disabled. + */ + 'request_headers_timeout'?: (_google_protobuf_Duration | null); + /** + * Determines if the port part should be removed from host/authority header before any processing + * of request by HTTP filters or routing. + * This affects the upstream host header unless the method is CONNECT in + * which case if no filter adds a port the original port will be restored before headers are sent upstream. + * Without setting this option, incoming requests with host ``example:443`` will not match against + * route with :ref:`domains` match set to ``example``. Defaults to ``false``. Note that port removal is not part + * of `HTTP spec `_ and is provided for convenience. + * Only one of ``strip_matching_host_port`` or ``strip_any_host_port`` can be set. + */ + 'strip_any_host_port'?: (boolean); + /** + * [#not-implemented-hide:] Path normalization configuration. This includes + * configurations for transformations (e.g. RFC 3986 normalization or merge + * adjacent slashes) and the policy to apply them. The policy determines + * whether transformations affect the forwarded ``:path`` header. RFC 3986 path + * normalization is enabled by default and the default policy is that the + * normalized header will be forwarded. See :ref:`PathNormalizationOptions + * ` + * for details. + */ + 'path_normalization_options'?: (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_PathNormalizationOptions | null); + /** + * Additional HTTP/3 settings that are passed directly to the HTTP/3 codec. + * [#not-implemented-hide:] + */ + 'http3_protocol_options'?: (_envoy_config_core_v3_Http3ProtocolOptions | null); + /** + * Action to take when request URL path contains escaped slash sequences (%2F, %2f, %5C and %5c). + * The default value can be overridden by the :ref:`http_connection_manager.path_with_escaped_slashes_action` + * runtime variable. + * The :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling` runtime + * variable can be used to apply the action to a portion of all requests. + * [#comment:TODO: This field is ignored when the + * :ref:`header validation configuration ` + * is present.] + */ + 'path_with_escaped_slashes_action'?: (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_PathWithEscapedSlashesAction); + /** + * The configuration for the original IP detection extensions. + * + * When configured the extensions will be called along with the request headers + * and information about the downstream connection, such as the directly connected address. + * Each extension will then use these parameters to decide the request's effective remote address. + * If an extension fails to detect the original IP address and isn't configured to reject + * the request, the HCM will try the remaining extensions until one succeeds or rejects + * the request. If the request isn't rejected nor any extension succeeds, the HCM will + * fallback to using the remote address. + * + * .. WARNING:: + * Extensions cannot be used in conjunction with :ref:`use_remote_address + * ` + * nor :ref:`xff_num_trusted_hops + * `. + * + * [#extension-category: envoy.http.original_ip_detection] + */ + 'original_ip_detection_extensions'?: (_envoy_config_core_v3_TypedExtensionConfig)[]; + /** + * Determines if trailing dot of the host should be removed from host/authority header before any + * processing of request by HTTP filters or routing. + * This affects the upstream host header. + * Without setting this option, incoming requests with host ``example.com.`` will not match against + * route with :ref:`domains` match set to ``example.com``. Defaults to ``false``. + * When the incoming request contains a host/authority header that includes a port number, + * setting this option will strip a trailing dot, if present, from the host section, + * leaving the port as is (e.g. host value ``example.com.:443`` will be updated to ``example.com:443``). + */ + 'strip_trailing_host_dot'?: (boolean); + /** + * Allows for explicit transformation of the :scheme header on the request path. + * If not set, Envoy's default :ref:`scheme ` + * handling applies. + */ + 'scheme_header_transformation'?: (_envoy_config_core_v3_SchemeHeaderTransformation | null); + /** + * Proxy-Status HTTP response header configuration. + * If this config is set, the Proxy-Status HTTP response header field is + * populated. By default, it is not. + */ + 'proxy_status_config'?: (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_ProxyStatusConfig | null); + /** + * Configuration options for Header Validation (UHV). + * UHV is an extensible mechanism for checking validity of HTTP requests as well as providing + * normalization for request attributes, such as URI path. + * If the typed_header_validation_config is present it overrides the following options: + * ``normalize_path``, ``merge_slashes``, ``path_with_escaped_slashes_action`` + * ``http_protocol_options.allow_chunked_length``, ``common_http_protocol_options.headers_with_underscores_action``. + * + * The default UHV checks the following: + * + * #. HTTP/1 header map validity according to `RFC 7230 section 3.2`_ + * #. Syntax of HTTP/1 request target URI and response status + * #. HTTP/2 header map validity according to `RFC 7540 section 8.1.2`_ + * #. Syntax of HTTP/3 pseudo headers + * #. Syntax of ``Content-Length`` and ``Transfer-Encoding`` + * #. Validation of HTTP/1 requests with both ``Content-Length`` and ``Transfer-Encoding`` headers + * #. Normalization of the URI path according to `Normalization and Comparison `_ + * without `case normalization `_ + * + * [#not-implemented-hide:] + * [#extension-category: envoy.http.header_validators] + */ + 'typed_header_validation_config'?: (_envoy_config_core_v3_TypedExtensionConfig | null); + /** + * Append the `x-forwarded-port` header with the port value client used to connect to Envoy. It + * will be ignored if the `x-forwarded-port` header has been set by any trusted proxy in front of Envoy. + */ + 'append_x_forwarded_port'?: (boolean); + /** + * The configuration for the early header mutation extensions. + * + * When configured the extensions will be called before any routing, tracing, or any filter processing. + * Each extension will be applied in the order they are configured. + * If the same header is mutated by multiple extensions, then the last extension will win. + * + * [#extension-category: envoy.http.early_header_mutation] + */ + 'early_header_mutation_extensions'?: (_envoy_config_core_v3_TypedExtensionConfig)[]; + /** + * Whether the HCM will add ProxyProtocolFilterState to the Connection lifetime filter state. Defaults to `true`. + * This should be set to `false` in cases where Envoy's view of the downstream address may not correspond to the + * actual client address, for example, if there's another proxy in front of the Envoy. + */ + 'add_proxy_protocol_connection_state'?: (_google_protobuf_BoolValue | null); + /** + * .. attention:: + * This field is deprecated in favor of + * :ref:`access_log_flush_interval + * `. + * Note that if both this field and :ref:`access_log_flush_interval + * ` + * are specified, the former (deprecated field) is ignored. + * @deprecated + */ + 'access_log_flush_interval'?: (_google_protobuf_Duration | null); + /** + * .. attention:: + * This field is deprecated in favor of + * :ref:`flush_access_log_on_new_request + * `. + * Note that if both this field and :ref:`flush_access_log_on_new_request + * ` + * are specified, the former (deprecated field) is ignored. + * @deprecated + */ + 'flush_access_log_on_new_request'?: (boolean); + /** + * Additional access log options for HTTP connection manager. + */ + 'access_log_options'?: (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_HcmAccessLogOptions | null); + 'route_specifier'?: "rds"|"route_config"|"scoped_routes"; + 'strip_port_mode'?: "strip_any_host_port"; +} + +/** + * [#next-free-field: 57] + */ +export interface HttpConnectionManager__Output { + /** + * Supplies the type of codec that the connection manager should use. + */ + 'codec_type': (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_CodecType__Output); + /** + * The human readable prefix to use when emitting statistics for the + * connection manager. See the :ref:`statistics documentation ` for + * more information. + */ + 'stat_prefix': (string); + /** + * The connection manager’s route table will be dynamically loaded via the RDS API. + */ + 'rds'?: (_envoy_extensions_filters_network_http_connection_manager_v3_Rds__Output | null); + /** + * The route table for the connection manager is static and is specified in this property. + */ + 'route_config'?: (_envoy_config_route_v3_RouteConfiguration__Output | null); + /** + * A list of individual HTTP filters that make up the filter chain for + * requests made to the connection manager. :ref:`Order matters ` + * as the filters are processed sequentially as request events happen. + */ + 'http_filters': (_envoy_extensions_filters_network_http_connection_manager_v3_HttpFilter__Output)[]; + /** + * Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` + * and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked + * documentation for more information. Defaults to false. + */ + 'add_user_agent': (_google_protobuf_BoolValue__Output | null); + /** + * Presence of the object defines whether the connection manager + * emits :ref:`tracing ` data to the :ref:`configured tracing provider + * `. + */ + 'tracing': (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_Tracing__Output | null); + /** + * Additional HTTP/1 settings that are passed to the HTTP/1 codec. + * [#comment:TODO: The following fields are ignored when the + * :ref:`header validation configuration ` + * is present: + * 1. :ref:`allow_chunked_length `] + */ + 'http_protocol_options': (_envoy_config_core_v3_Http1ProtocolOptions__Output | null); + /** + * Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. + */ + 'http2_protocol_options': (_envoy_config_core_v3_Http2ProtocolOptions__Output | null); + /** + * An optional override that the connection manager will write to the server + * header in responses. If not set, the default is ``envoy``. + */ + 'server_name': (string); + /** + * The time that Envoy will wait between sending an HTTP/2 “shutdown + * notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. + * This is used so that Envoy provides a grace period for new streams that + * race with the final GOAWAY frame. During this grace period, Envoy will + * continue to accept new streams. After the grace period, a final GOAWAY + * frame is sent and Envoy will start refusing new streams. Draining occurs + * both when a connection hits the idle timeout or during general server + * draining. The default grace period is 5000 milliseconds (5 seconds) if this + * option is not specified. + */ + 'drain_timeout': (_google_protobuf_Duration__Output | null); + /** + * Configuration for :ref:`HTTP access logs ` + * emitted by the connection manager. + */ + 'access_log': (_envoy_config_accesslog_v3_AccessLog__Output)[]; + /** + * If set to true, the connection manager will use the real remote address + * of the client connection when determining internal versus external origin and manipulating + * various headers. If set to false or absent, the connection manager will use the + * :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for + * :ref:`config_http_conn_man_headers_x-forwarded-for`, + * :ref:`config_http_conn_man_headers_x-envoy-internal`, and + * :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. + */ + 'use_remote_address': (_google_protobuf_BoolValue__Output | null); + /** + * Whether the connection manager will generate the :ref:`x-request-id + * ` header if it does not exist. This defaults to + * true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature + * is not desired it can be disabled. + */ + 'generate_request_id': (_google_protobuf_BoolValue__Output | null); + /** + * How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP + * header. + */ + 'forward_client_cert_details': (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_ForwardClientCertDetails__Output); + /** + * This field is valid only when :ref:`forward_client_cert_details + * ` + * is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in + * the client certificate to be forwarded. Note that in the + * :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, ``Hash`` is always set, and + * ``By`` is always set when the client certificate presents the URI type Subject Alternative Name + * value. + */ + 'set_current_client_cert_details': (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_SetCurrentClientCertDetails__Output | null); + /** + * If proxy_100_continue is true, Envoy will proxy incoming "Expect: + * 100-continue" headers upstream, and forward "100 Continue" responses + * downstream. If this is false or not set, Envoy will instead strip the + * "Expect: 100-continue" header, and send a "100 Continue" response itself. + */ + 'proxy_100_continue': (boolean); + /** + * The number of additional ingress proxy hops from the right side of the + * :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when + * determining the origin client's IP address. The default is zero if this option + * is not specified. See the documentation for + * :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. + */ + 'xff_num_trusted_hops': (number); + /** + * If + * :ref:`use_remote_address + * ` + * is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is + * an IPv4 address, the address will be mapped to IPv6 before it is appended to ``x-forwarded-for``. + * This is useful for testing compatibility of upstream services that parse the header value. For + * example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses + * `_ for details. This will also affect the + * :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See + * :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 + * ` for runtime + * control. + * [#not-implemented-hide:] + */ + 'represent_ipv4_remote_address_as_ipv4_mapped_ipv6': (boolean); + /** + * If set, Envoy will not append the remote address to the + * :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in + * conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager + * has mutated the request headers. While :ref:`use_remote_address + * ` + * will also suppress XFF addition, it has consequences for logging and other + * Envoy uses of the remote address, so ``skip_xff_append`` should be used + * when only an elision of XFF addition is intended. + */ + 'skip_xff_append': (boolean); + /** + * Via header value to append to request and response headers. If this is + * empty, no via header will be appended. + */ + 'via': (string); + 'upgrade_configs': (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_UpgradeConfig__Output)[]; + /** + * The stream idle timeout for connections managed by the connection manager. + * If not specified, this defaults to 5 minutes. The default value was selected + * so as not to interfere with any smaller configured timeouts that may have + * existed in configurations prior to the introduction of this feature, while + * introducing robustness to TCP connections that terminate without a FIN. + * + * This idle timeout applies to new streams and is overridable by the + * :ref:`route-level idle_timeout + * `. Even on a stream in + * which the override applies, prior to receipt of the initial request + * headers, the :ref:`stream_idle_timeout + * ` + * applies. Each time an encode/decode event for headers or data is processed + * for the stream, the timer will be reset. If the timeout fires, the stream + * is terminated with a 408 Request Timeout error code if no upstream response + * header has been received, otherwise a stream reset occurs. + * + * This timeout also specifies the amount of time that Envoy will wait for the peer to open enough + * window to write any remaining stream data once the entirety of stream data (local end stream is + * true) has been buffered pending available window. In other words, this timeout defends against + * a peer that does not release enough window to completely write the stream, even though all + * data has been proxied within available flow control windows. If the timeout is hit in this + * case, the :ref:`tx_flush_timeout ` counter will be + * incremented. Note that :ref:`max_stream_duration + * ` does not apply to + * this corner case. + * + * If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts" + * is configured, this timeout is scaled according to the value for + * :ref:`HTTP_DOWNSTREAM_STREAM_IDLE `. + * + * Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due + * to the granularity of events presented to the connection manager. For example, while receiving + * very large request headers, it may be the case that there is traffic regularly arriving on the + * wire while the connection manage is only able to observe the end-of-headers event, hence the + * stream may still idle timeout. + * + * A value of 0 will completely disable the connection manager stream idle + * timeout, although per-route idle timeout overrides will continue to apply. + */ + 'stream_idle_timeout': (_google_protobuf_Duration__Output | null); + /** + * Configures what network addresses are considered internal for stats and header sanitation + * purposes. If unspecified, only RFC1918 IP addresses will be considered internal. + * See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more + * information about internal/external addresses. + */ + 'internal_address_config': (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_InternalAddressConfig__Output | null); + /** + * The delayed close timeout is for downstream connections managed by the HTTP connection manager. + * It is defined as a grace period after connection close processing has been locally initiated + * during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy + * from the downstream connection) prior to Envoy closing the socket associated with that + * connection. + * NOTE: This timeout is enforced even when the socket associated with the downstream connection + * is pending a flush of the write buffer. However, any progress made writing data to the socket + * will restart the timer associated with this timeout. This means that the total grace period for + * a socket in this state will be + * +. + * + * Delaying Envoy's connection close and giving the peer the opportunity to initiate the close + * sequence mitigates a race condition that exists when downstream clients do not drain/process + * data in a connection's receive buffer after a remote close has been detected via a socket + * write(). This race leads to such clients failing to process the response code sent by Envoy, + * which could result in erroneous downstream processing. + * + * If the timeout triggers, Envoy will close the connection's socket. + * + * The default timeout is 1000 ms if this option is not specified. + * + * .. NOTE:: + * To be useful in avoiding the race condition described above, this timeout must be set + * to *at least* +<100ms to account for + * a reasonable "worst" case processing time for a full iteration of Envoy's event loop>. + * + * .. WARNING:: + * A value of 0 will completely disable delayed close processing. When disabled, the downstream + * connection's socket will be closed immediately after the write flush is completed or will + * never close if the write flush does not complete. + */ + 'delayed_close_timeout': (_google_protobuf_Duration__Output | null); + /** + * The amount of time that Envoy will wait for the entire request to be received. + * The timer is activated when the request is initiated, and is disarmed when the last byte of the + * request is sent upstream (i.e. all decoding filters have processed the request), OR when the + * response is initiated. If not specified or set to 0, this timeout is disabled. + */ + 'request_timeout': (_google_protobuf_Duration__Output | null); + /** + * The maximum request headers size for incoming connections. + * If unconfigured, the default max request headers allowed is 60 KiB. + * Requests that exceed this limit will receive a 431 response. + */ + 'max_request_headers_kb': (_google_protobuf_UInt32Value__Output | null); + /** + * Should paths be normalized according to RFC 3986 before any processing of + * requests by HTTP filters or routing? This affects the upstream ``:path`` header + * as well. For paths that fail this check, Envoy will respond with 400 to + * paths that are malformed. This defaults to false currently but will default + * true in the future. When not specified, this value may be overridden by the + * runtime variable + * :ref:`http_connection_manager.normalize_path`. + * See `Normalization and Comparison `_ + * for details of normalization. + * Note that Envoy does not perform + * `case normalization `_ + * [#comment:TODO: This field is ignored when the + * :ref:`header validation configuration ` + * is present.] + */ + 'normalize_path': (_google_protobuf_BoolValue__Output | null); + /** + * A route table will be dynamically assigned to each request based on request attributes + * (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are + * specified in this message. + */ + 'scoped_routes'?: (_envoy_extensions_filters_network_http_connection_manager_v3_ScopedRoutes__Output | null); + /** + * Whether the connection manager will keep the :ref:`x-request-id + * ` header if passed for a request that is edge + * (Edge request is the request from external clients to front Envoy) and not reset it, which + * is the current Envoy behaviour. This defaults to false. + */ + 'preserve_external_request_id': (boolean); + /** + * Determines if adjacent slashes in the path are merged into one before any processing of + * requests by HTTP filters or routing. This affects the upstream ``:path`` header as well. Without + * setting this option, incoming requests with path ``//dir///file`` will not match against route + * with ``prefix`` match set to ``/dir``. Defaults to ``false``. Note that slash merging is not part of + * `HTTP spec `_ and is provided for convenience. + * [#comment:TODO: This field is ignored when the + * :ref:`header validation configuration ` + * is present.] + */ + 'merge_slashes': (boolean); + /** + * Defines the action to be applied to the Server header on the response path. + * By default, Envoy will overwrite the header with the value specified in + * server_name. + */ + 'server_header_transformation': (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_ServerHeaderTransformation__Output); + /** + * Additional settings for HTTP requests handled by the connection manager. These will be + * applicable to both HTTP1 and HTTP2 requests. + */ + 'common_http_protocol_options': (_envoy_config_core_v3_HttpProtocolOptions__Output | null); + /** + * The configuration of the request ID extension. This includes operations such as + * generation, validation, and associated tracing operations. If empty, the + * :ref:`UuidRequestIdConfig ` + * default extension is used with default parameters. See the documentation for that extension + * for details on what it does. Customizing the configuration for the default extension can be + * achieved by configuring it explicitly here. For example, to disable trace reason packing, + * the following configuration can be used: + * + * .. validated-code-block:: yaml + * :type-name: envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension + * + * typed_config: + * "@type": type.googleapis.com/envoy.extensions.request_id.uuid.v3.UuidRequestIdConfig + * pack_trace_reason: false + * + * [#extension-category: envoy.request_id] + */ + 'request_id_extension': (_envoy_extensions_filters_network_http_connection_manager_v3_RequestIDExtension__Output | null); + /** + * If set, Envoy will always set :ref:`x-request-id ` header in response. + * If this is false or not set, the request ID is returned in responses only if tracing is forced using + * :ref:`x-envoy-force-trace ` header. + */ + 'always_set_request_id_in_response': (boolean); + /** + * The configuration to customize local reply returned by Envoy. It can customize status code, + * body text and response content type. If not specified, status code and text body are hard + * coded in Envoy, the response content type is plain text. + */ + 'local_reply_config': (_envoy_extensions_filters_network_http_connection_manager_v3_LocalReplyConfig__Output | null); + /** + * Determines if the port part should be removed from host/authority header before any processing + * of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` + * local port. This affects the upstream host header unless the method is + * CONNECT in which case if no filter adds a port the original port will be restored before headers are + * sent upstream. + * Without setting this option, incoming requests with host ``example:443`` will not match against + * route with :ref:`domains` match set to ``example``. Defaults to ``false``. Note that port removal is not part + * of `HTTP spec `_ and is provided for convenience. + * Only one of ``strip_matching_host_port`` or ``strip_any_host_port`` can be set. + */ + 'strip_matching_host_port': (boolean); + /** + * Governs Envoy's behavior when receiving invalid HTTP from downstream. + * If this option is false (default), Envoy will err on the conservative side handling HTTP + * errors, terminating both HTTP/1.1 and HTTP/2 connections when receiving an invalid request. + * If this option is set to true, Envoy will be more permissive, only resetting the invalid + * stream in the case of HTTP/2 and leaving the connection open where possible (if the entire + * request is read for HTTP/1.1) + * In general this should be true for deployments receiving trusted traffic (L2 Envoys, + * company-internal mesh) and false when receiving untrusted traffic (edge deployments). + * + * If different behaviors for invalid_http_message for HTTP/1 and HTTP/2 are + * desired, one should use the new HTTP/1 option :ref:`override_stream_error_on_invalid_http_message + * ` or the new HTTP/2 option + * :ref:`override_stream_error_on_invalid_http_message + * ` + * ``not`` the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging + * ` + */ + 'stream_error_on_invalid_http_message': (_google_protobuf_BoolValue__Output | null); + /** + * The amount of time that Envoy will wait for the request headers to be received. The timer is + * activated when the first byte of the headers is received, and is disarmed when the last byte of + * the headers has been received. If not specified or set to 0, this timeout is disabled. + */ + 'request_headers_timeout': (_google_protobuf_Duration__Output | null); + /** + * Determines if the port part should be removed from host/authority header before any processing + * of request by HTTP filters or routing. + * This affects the upstream host header unless the method is CONNECT in + * which case if no filter adds a port the original port will be restored before headers are sent upstream. + * Without setting this option, incoming requests with host ``example:443`` will not match against + * route with :ref:`domains` match set to ``example``. Defaults to ``false``. Note that port removal is not part + * of `HTTP spec `_ and is provided for convenience. + * Only one of ``strip_matching_host_port`` or ``strip_any_host_port`` can be set. + */ + 'strip_any_host_port'?: (boolean); + /** + * [#not-implemented-hide:] Path normalization configuration. This includes + * configurations for transformations (e.g. RFC 3986 normalization or merge + * adjacent slashes) and the policy to apply them. The policy determines + * whether transformations affect the forwarded ``:path`` header. RFC 3986 path + * normalization is enabled by default and the default policy is that the + * normalized header will be forwarded. See :ref:`PathNormalizationOptions + * ` + * for details. + */ + 'path_normalization_options': (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_PathNormalizationOptions__Output | null); + /** + * Additional HTTP/3 settings that are passed directly to the HTTP/3 codec. + * [#not-implemented-hide:] + */ + 'http3_protocol_options': (_envoy_config_core_v3_Http3ProtocolOptions__Output | null); + /** + * Action to take when request URL path contains escaped slash sequences (%2F, %2f, %5C and %5c). + * The default value can be overridden by the :ref:`http_connection_manager.path_with_escaped_slashes_action` + * runtime variable. + * The :ref:`http_connection_manager.path_with_escaped_slashes_action_sampling` runtime + * variable can be used to apply the action to a portion of all requests. + * [#comment:TODO: This field is ignored when the + * :ref:`header validation configuration ` + * is present.] + */ + 'path_with_escaped_slashes_action': (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_PathWithEscapedSlashesAction__Output); + /** + * The configuration for the original IP detection extensions. + * + * When configured the extensions will be called along with the request headers + * and information about the downstream connection, such as the directly connected address. + * Each extension will then use these parameters to decide the request's effective remote address. + * If an extension fails to detect the original IP address and isn't configured to reject + * the request, the HCM will try the remaining extensions until one succeeds or rejects + * the request. If the request isn't rejected nor any extension succeeds, the HCM will + * fallback to using the remote address. + * + * .. WARNING:: + * Extensions cannot be used in conjunction with :ref:`use_remote_address + * ` + * nor :ref:`xff_num_trusted_hops + * `. + * + * [#extension-category: envoy.http.original_ip_detection] + */ + 'original_ip_detection_extensions': (_envoy_config_core_v3_TypedExtensionConfig__Output)[]; + /** + * Determines if trailing dot of the host should be removed from host/authority header before any + * processing of request by HTTP filters or routing. + * This affects the upstream host header. + * Without setting this option, incoming requests with host ``example.com.`` will not match against + * route with :ref:`domains` match set to ``example.com``. Defaults to ``false``. + * When the incoming request contains a host/authority header that includes a port number, + * setting this option will strip a trailing dot, if present, from the host section, + * leaving the port as is (e.g. host value ``example.com.:443`` will be updated to ``example.com:443``). + */ + 'strip_trailing_host_dot': (boolean); + /** + * Allows for explicit transformation of the :scheme header on the request path. + * If not set, Envoy's default :ref:`scheme ` + * handling applies. + */ + 'scheme_header_transformation': (_envoy_config_core_v3_SchemeHeaderTransformation__Output | null); + /** + * Proxy-Status HTTP response header configuration. + * If this config is set, the Proxy-Status HTTP response header field is + * populated. By default, it is not. + */ + 'proxy_status_config': (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_ProxyStatusConfig__Output | null); + /** + * Configuration options for Header Validation (UHV). + * UHV is an extensible mechanism for checking validity of HTTP requests as well as providing + * normalization for request attributes, such as URI path. + * If the typed_header_validation_config is present it overrides the following options: + * ``normalize_path``, ``merge_slashes``, ``path_with_escaped_slashes_action`` + * ``http_protocol_options.allow_chunked_length``, ``common_http_protocol_options.headers_with_underscores_action``. + * + * The default UHV checks the following: + * + * #. HTTP/1 header map validity according to `RFC 7230 section 3.2`_ + * #. Syntax of HTTP/1 request target URI and response status + * #. HTTP/2 header map validity according to `RFC 7540 section 8.1.2`_ + * #. Syntax of HTTP/3 pseudo headers + * #. Syntax of ``Content-Length`` and ``Transfer-Encoding`` + * #. Validation of HTTP/1 requests with both ``Content-Length`` and ``Transfer-Encoding`` headers + * #. Normalization of the URI path according to `Normalization and Comparison `_ + * without `case normalization `_ + * + * [#not-implemented-hide:] + * [#extension-category: envoy.http.header_validators] + */ + 'typed_header_validation_config': (_envoy_config_core_v3_TypedExtensionConfig__Output | null); + /** + * Append the `x-forwarded-port` header with the port value client used to connect to Envoy. It + * will be ignored if the `x-forwarded-port` header has been set by any trusted proxy in front of Envoy. + */ + 'append_x_forwarded_port': (boolean); + /** + * The configuration for the early header mutation extensions. + * + * When configured the extensions will be called before any routing, tracing, or any filter processing. + * Each extension will be applied in the order they are configured. + * If the same header is mutated by multiple extensions, then the last extension will win. + * + * [#extension-category: envoy.http.early_header_mutation] + */ + 'early_header_mutation_extensions': (_envoy_config_core_v3_TypedExtensionConfig__Output)[]; + /** + * Whether the HCM will add ProxyProtocolFilterState to the Connection lifetime filter state. Defaults to `true`. + * This should be set to `false` in cases where Envoy's view of the downstream address may not correspond to the + * actual client address, for example, if there's another proxy in front of the Envoy. + */ + 'add_proxy_protocol_connection_state': (_google_protobuf_BoolValue__Output | null); + /** + * .. attention:: + * This field is deprecated in favor of + * :ref:`access_log_flush_interval + * `. + * Note that if both this field and :ref:`access_log_flush_interval + * ` + * are specified, the former (deprecated field) is ignored. + * @deprecated + */ + 'access_log_flush_interval': (_google_protobuf_Duration__Output | null); + /** + * .. attention:: + * This field is deprecated in favor of + * :ref:`flush_access_log_on_new_request + * `. + * Note that if both this field and :ref:`flush_access_log_on_new_request + * ` + * are specified, the former (deprecated field) is ignored. + * @deprecated + */ + 'flush_access_log_on_new_request': (boolean); + /** + * Additional access log options for HTTP connection manager. + */ + 'access_log_options': (_envoy_extensions_filters_network_http_connection_manager_v3_HttpConnectionManager_HcmAccessLogOptions__Output | null); + 'route_specifier': "rds"|"route_config"|"scoped_routes"; + 'strip_port_mode': "strip_any_host_port"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/HttpFilter.ts b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/HttpFilter.ts new file mode 100644 index 000000000..1550ca237 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/HttpFilter.ts @@ -0,0 +1,80 @@ +// Original file: deps/envoy-api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto + +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../../../google/protobuf/Any'; +import type { ExtensionConfigSource as _envoy_config_core_v3_ExtensionConfigSource, ExtensionConfigSource__Output as _envoy_config_core_v3_ExtensionConfigSource__Output } from '../../../../../../envoy/config/core/v3/ExtensionConfigSource'; + +/** + * [#next-free-field: 7] + */ +export interface HttpFilter { + /** + * The name of the filter configuration. It also serves as a resource name in ExtensionConfigDS. + */ + 'name'?: (string); + /** + * Filter specific configuration which depends on the filter being instantiated. See the supported + * filters for further documentation. + * + * To support configuring a :ref:`match tree `, use an + * :ref:`ExtensionWithMatcher ` + * with the desired HTTP filter. + * [#extension-category: envoy.filters.http] + */ + 'typed_config'?: (_google_protobuf_Any | null); + /** + * Configuration source specifier for an extension configuration discovery service. + * In case of a failure and without the default configuration, the HTTP listener responds with code 500. + * Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061). + * + * To support configuring a :ref:`match tree `, use an + * :ref:`ExtensionWithMatcher ` + * with the desired HTTP filter. This works for both the default filter configuration as well + * as for filters provided via the API. + */ + 'config_discovery'?: (_envoy_config_core_v3_ExtensionConfigSource | null); + /** + * If true, clients that do not support this filter may ignore the + * filter but otherwise accept the config. + * Otherwise, clients that do not support this filter must reject the config. + */ + 'is_optional'?: (boolean); + 'config_type'?: "typed_config"|"config_discovery"; +} + +/** + * [#next-free-field: 7] + */ +export interface HttpFilter__Output { + /** + * The name of the filter configuration. It also serves as a resource name in ExtensionConfigDS. + */ + 'name': (string); + /** + * Filter specific configuration which depends on the filter being instantiated. See the supported + * filters for further documentation. + * + * To support configuring a :ref:`match tree `, use an + * :ref:`ExtensionWithMatcher ` + * with the desired HTTP filter. + * [#extension-category: envoy.filters.http] + */ + 'typed_config'?: (_google_protobuf_Any__Output | null); + /** + * Configuration source specifier for an extension configuration discovery service. + * In case of a failure and without the default configuration, the HTTP listener responds with code 500. + * Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061). + * + * To support configuring a :ref:`match tree `, use an + * :ref:`ExtensionWithMatcher ` + * with the desired HTTP filter. This works for both the default filter configuration as well + * as for filters provided via the API. + */ + 'config_discovery'?: (_envoy_config_core_v3_ExtensionConfigSource__Output | null); + /** + * If true, clients that do not support this filter may ignore the + * filter but otherwise accept the config. + * Otherwise, clients that do not support this filter must reject the config. + */ + 'is_optional': (boolean); + 'config_type': "typed_config"|"config_discovery"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/LocalReplyConfig.ts b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/LocalReplyConfig.ts new file mode 100644 index 000000000..04de11fcb --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/LocalReplyConfig.ts @@ -0,0 +1,106 @@ +// Original file: deps/envoy-api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto + +import type { ResponseMapper as _envoy_extensions_filters_network_http_connection_manager_v3_ResponseMapper, ResponseMapper__Output as _envoy_extensions_filters_network_http_connection_manager_v3_ResponseMapper__Output } from '../../../../../../envoy/extensions/filters/network/http_connection_manager/v3/ResponseMapper'; +import type { SubstitutionFormatString as _envoy_config_core_v3_SubstitutionFormatString, SubstitutionFormatString__Output as _envoy_config_core_v3_SubstitutionFormatString__Output } from '../../../../../../envoy/config/core/v3/SubstitutionFormatString'; + +/** + * The configuration to customize local reply returned by Envoy. + */ +export interface LocalReplyConfig { + /** + * Configuration of list of mappers which allows to filter and change local response. + * The mappers will be checked by the specified order until one is matched. + */ + 'mappers'?: (_envoy_extensions_filters_network_http_connection_manager_v3_ResponseMapper)[]; + /** + * The configuration to form response body from the :ref:`command operators ` + * and to specify response content type as one of: plain/text or application/json. + * + * Example one: "plain/text" ``body_format``. + * + * .. validated-code-block:: yaml + * :type-name: envoy.config.core.v3.SubstitutionFormatString + * + * text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" + * + * The following response body in "plain/text" format will be generated for a request with + * local reply body of "upstream connection error", response_code=503 and path=/foo. + * + * .. code-block:: text + * + * upstream connect error:503:path=/foo + * + * Example two: "application/json" ``body_format``. + * + * .. validated-code-block:: yaml + * :type-name: envoy.config.core.v3.SubstitutionFormatString + * + * json_format: + * status: "%RESPONSE_CODE%" + * message: "%LOCAL_REPLY_BODY%" + * path: "%REQ(:path)%" + * + * The following response body in "application/json" format would be generated for a request with + * local reply body of "upstream connection error", response_code=503 and path=/foo. + * + * .. code-block:: json + * + * { + * "status": 503, + * "message": "upstream connection error", + * "path": "/foo" + * } + */ + 'body_format'?: (_envoy_config_core_v3_SubstitutionFormatString | null); +} + +/** + * The configuration to customize local reply returned by Envoy. + */ +export interface LocalReplyConfig__Output { + /** + * Configuration of list of mappers which allows to filter and change local response. + * The mappers will be checked by the specified order until one is matched. + */ + 'mappers': (_envoy_extensions_filters_network_http_connection_manager_v3_ResponseMapper__Output)[]; + /** + * The configuration to form response body from the :ref:`command operators ` + * and to specify response content type as one of: plain/text or application/json. + * + * Example one: "plain/text" ``body_format``. + * + * .. validated-code-block:: yaml + * :type-name: envoy.config.core.v3.SubstitutionFormatString + * + * text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" + * + * The following response body in "plain/text" format will be generated for a request with + * local reply body of "upstream connection error", response_code=503 and path=/foo. + * + * .. code-block:: text + * + * upstream connect error:503:path=/foo + * + * Example two: "application/json" ``body_format``. + * + * .. validated-code-block:: yaml + * :type-name: envoy.config.core.v3.SubstitutionFormatString + * + * json_format: + * status: "%RESPONSE_CODE%" + * message: "%LOCAL_REPLY_BODY%" + * path: "%REQ(:path)%" + * + * The following response body in "application/json" format would be generated for a request with + * local reply body of "upstream connection error", response_code=503 and path=/foo. + * + * .. code-block:: json + * + * { + * "status": 503, + * "message": "upstream connection error", + * "path": "/foo" + * } + */ + 'body_format': (_envoy_config_core_v3_SubstitutionFormatString__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/filter/network/http_connection_manager/v2/Rds.ts b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/Rds.ts similarity index 62% rename from packages/grpc-js-xds/src/generated/envoy/config/filter/network/http_connection_manager/v2/Rds.ts rename to packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/Rds.ts index be9c038a6..b99e2f1bd 100644 --- a/packages/grpc-js-xds/src/generated/envoy/config/filter/network/http_connection_manager/v2/Rds.ts +++ b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/Rds.ts @@ -1,12 +1,12 @@ -// Original file: deps/envoy-api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +// Original file: deps/envoy-api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto -import type { ConfigSource as _envoy_api_v2_core_ConfigSource, ConfigSource__Output as _envoy_api_v2_core_ConfigSource__Output } from '../../../../../../envoy/api/v2/core/ConfigSource'; +import type { ConfigSource as _envoy_config_core_v3_ConfigSource, ConfigSource__Output as _envoy_config_core_v3_ConfigSource__Output } from '../../../../../../envoy/config/core/v3/ConfigSource'; export interface Rds { /** * Configuration source specifier for RDS. */ - 'config_source'?: (_envoy_api_v2_core_ConfigSource); + 'config_source'?: (_envoy_config_core_v3_ConfigSource | null); /** * The name of the route configuration. This name will be passed to the RDS * API. This allows an Envoy configuration with multiple HTTP listeners (and @@ -20,7 +20,7 @@ export interface Rds__Output { /** * Configuration source specifier for RDS. */ - 'config_source'?: (_envoy_api_v2_core_ConfigSource__Output); + 'config_source': (_envoy_config_core_v3_ConfigSource__Output | null); /** * The name of the route configuration. This name will be passed to the RDS * API. This allows an Envoy configuration with multiple HTTP listeners (and diff --git a/packages/grpc-js-xds/src/generated/envoy/config/filter/network/http_connection_manager/v2/RequestIDExtension.ts b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/RequestIDExtension.ts similarity index 60% rename from packages/grpc-js-xds/src/generated/envoy/config/filter/network/http_connection_manager/v2/RequestIDExtension.ts rename to packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/RequestIDExtension.ts index 2f043d4a8..ba1789a80 100644 --- a/packages/grpc-js-xds/src/generated/envoy/config/filter/network/http_connection_manager/v2/RequestIDExtension.ts +++ b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/RequestIDExtension.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +// Original file: deps/envoy-api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../../../google/protobuf/Any'; @@ -6,12 +6,12 @@ export interface RequestIDExtension { /** * Request ID extension specific configuration. */ - 'typed_config'?: (_google_protobuf_Any); + 'typed_config'?: (_google_protobuf_Any | null); } export interface RequestIDExtension__Output { /** * Request ID extension specific configuration. */ - 'typed_config'?: (_google_protobuf_Any__Output); + 'typed_config': (_google_protobuf_Any__Output | null); } diff --git a/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/ResponseMapper.ts b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/ResponseMapper.ts new file mode 100644 index 000000000..66533b411 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/ResponseMapper.ts @@ -0,0 +1,67 @@ +// Original file: deps/envoy-api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto + +import type { AccessLogFilter as _envoy_config_accesslog_v3_AccessLogFilter, AccessLogFilter__Output as _envoy_config_accesslog_v3_AccessLogFilter__Output } from '../../../../../../envoy/config/accesslog/v3/AccessLogFilter'; +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../../../google/protobuf/UInt32Value'; +import type { DataSource as _envoy_config_core_v3_DataSource, DataSource__Output as _envoy_config_core_v3_DataSource__Output } from '../../../../../../envoy/config/core/v3/DataSource'; +import type { SubstitutionFormatString as _envoy_config_core_v3_SubstitutionFormatString, SubstitutionFormatString__Output as _envoy_config_core_v3_SubstitutionFormatString__Output } from '../../../../../../envoy/config/core/v3/SubstitutionFormatString'; +import type { HeaderValueOption as _envoy_config_core_v3_HeaderValueOption, HeaderValueOption__Output as _envoy_config_core_v3_HeaderValueOption__Output } from '../../../../../../envoy/config/core/v3/HeaderValueOption'; + +/** + * The configuration to filter and change local response. + * [#next-free-field: 6] + */ +export interface ResponseMapper { + /** + * Filter to determine if this mapper should apply. + */ + 'filter'?: (_envoy_config_accesslog_v3_AccessLogFilter | null); + /** + * The new response status code if specified. + */ + 'status_code'?: (_google_protobuf_UInt32Value | null); + /** + * The new local reply body text if specified. It will be used in the ``%LOCAL_REPLY_BODY%`` + * command operator in the ``body_format``. + */ + 'body'?: (_envoy_config_core_v3_DataSource | null); + /** + * A per mapper ``body_format`` to override the :ref:`body_format `. + * It will be used when this mapper is matched. + */ + 'body_format_override'?: (_envoy_config_core_v3_SubstitutionFormatString | null); + /** + * HTTP headers to add to a local reply. This allows the response mapper to append, to add + * or to override headers of any local reply before it is sent to a downstream client. + */ + 'headers_to_add'?: (_envoy_config_core_v3_HeaderValueOption)[]; +} + +/** + * The configuration to filter and change local response. + * [#next-free-field: 6] + */ +export interface ResponseMapper__Output { + /** + * Filter to determine if this mapper should apply. + */ + 'filter': (_envoy_config_accesslog_v3_AccessLogFilter__Output | null); + /** + * The new response status code if specified. + */ + 'status_code': (_google_protobuf_UInt32Value__Output | null); + /** + * The new local reply body text if specified. It will be used in the ``%LOCAL_REPLY_BODY%`` + * command operator in the ``body_format``. + */ + 'body': (_envoy_config_core_v3_DataSource__Output | null); + /** + * A per mapper ``body_format`` to override the :ref:`body_format `. + * It will be used when this mapper is matched. + */ + 'body_format_override': (_envoy_config_core_v3_SubstitutionFormatString__Output | null); + /** + * HTTP headers to add to a local reply. This allows the response mapper to append, to add + * or to override headers of any local reply before it is sent to a downstream client. + */ + 'headers_to_add': (_envoy_config_core_v3_HeaderValueOption__Output)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/ScopedRds.ts b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/ScopedRds.ts new file mode 100644 index 000000000..5f7d2b6fe --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/ScopedRds.ts @@ -0,0 +1,27 @@ +// Original file: deps/envoy-api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto + +import type { ConfigSource as _envoy_config_core_v3_ConfigSource, ConfigSource__Output as _envoy_config_core_v3_ConfigSource__Output } from '../../../../../../envoy/config/core/v3/ConfigSource'; + +export interface ScopedRds { + /** + * Configuration source specifier for scoped RDS. + */ + 'scoped_rds_config_source'?: (_envoy_config_core_v3_ConfigSource | null); + /** + * xdstp:// resource locator for scoped RDS collection. + * [#not-implemented-hide:] + */ + 'srds_resources_locator'?: (string); +} + +export interface ScopedRds__Output { + /** + * Configuration source specifier for scoped RDS. + */ + 'scoped_rds_config_source': (_envoy_config_core_v3_ConfigSource__Output | null); + /** + * xdstp:// resource locator for scoped RDS collection. + * [#not-implemented-hide:] + */ + 'srds_resources_locator': (string); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/ScopedRouteConfigurationsList.ts b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/ScopedRouteConfigurationsList.ts new file mode 100644 index 000000000..e7f05e340 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/ScopedRouteConfigurationsList.ts @@ -0,0 +1,17 @@ +// Original file: deps/envoy-api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto + +import type { ScopedRouteConfiguration as _envoy_config_route_v3_ScopedRouteConfiguration, ScopedRouteConfiguration__Output as _envoy_config_route_v3_ScopedRouteConfiguration__Output } from '../../../../../../envoy/config/route/v3/ScopedRouteConfiguration'; + +/** + * This message is used to work around the limitations with 'oneof' and repeated fields. + */ +export interface ScopedRouteConfigurationsList { + 'scoped_route_configurations'?: (_envoy_config_route_v3_ScopedRouteConfiguration)[]; +} + +/** + * This message is used to work around the limitations with 'oneof' and repeated fields. + */ +export interface ScopedRouteConfigurationsList__Output { + 'scoped_route_configurations': (_envoy_config_route_v3_ScopedRouteConfiguration__Output)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/config/filter/network/http_connection_manager/v2/ScopedRoutes.ts b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/ScopedRoutes.ts similarity index 55% rename from packages/grpc-js-xds/src/generated/envoy/config/filter/network/http_connection_manager/v2/ScopedRoutes.ts rename to packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/ScopedRoutes.ts index b9b20d2bf..041af534b 100644 --- a/packages/grpc-js-xds/src/generated/envoy/config/filter/network/http_connection_manager/v2/ScopedRoutes.ts +++ b/packages/grpc-js-xds/src/generated/envoy/extensions/filters/network/http_connection_manager/v3/ScopedRoutes.ts @@ -1,28 +1,28 @@ -// Original file: deps/envoy-api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +// Original file: deps/envoy-api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto -import type { ConfigSource as _envoy_api_v2_core_ConfigSource, ConfigSource__Output as _envoy_api_v2_core_ConfigSource__Output } from '../../../../../../envoy/api/v2/core/ConfigSource'; -import type { ScopedRouteConfigurationsList as _envoy_config_filter_network_http_connection_manager_v2_ScopedRouteConfigurationsList, ScopedRouteConfigurationsList__Output as _envoy_config_filter_network_http_connection_manager_v2_ScopedRouteConfigurationsList__Output } from '../../../../../../envoy/config/filter/network/http_connection_manager/v2/ScopedRouteConfigurationsList'; -import type { ScopedRds as _envoy_config_filter_network_http_connection_manager_v2_ScopedRds, ScopedRds__Output as _envoy_config_filter_network_http_connection_manager_v2_ScopedRds__Output } from '../../../../../../envoy/config/filter/network/http_connection_manager/v2/ScopedRds'; +import type { ConfigSource as _envoy_config_core_v3_ConfigSource, ConfigSource__Output as _envoy_config_core_v3_ConfigSource__Output } from '../../../../../../envoy/config/core/v3/ConfigSource'; +import type { ScopedRouteConfigurationsList as _envoy_extensions_filters_network_http_connection_manager_v3_ScopedRouteConfigurationsList, ScopedRouteConfigurationsList__Output as _envoy_extensions_filters_network_http_connection_manager_v3_ScopedRouteConfigurationsList__Output } from '../../../../../../envoy/extensions/filters/network/http_connection_manager/v3/ScopedRouteConfigurationsList'; +import type { ScopedRds as _envoy_extensions_filters_network_http_connection_manager_v3_ScopedRds, ScopedRds__Output as _envoy_extensions_filters_network_http_connection_manager_v3_ScopedRds__Output } from '../../../../../../envoy/extensions/filters/network/http_connection_manager/v3/ScopedRds'; /** * Specifies the mechanism for constructing key fragments which are composed into scope keys. */ -export interface _envoy_config_filter_network_http_connection_manager_v2_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder { +export interface _envoy_extensions_filters_network_http_connection_manager_v3_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder { /** * Specifies how a header field's value should be extracted. */ - 'header_value_extractor'?: (_envoy_config_filter_network_http_connection_manager_v2_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor); + 'header_value_extractor'?: (_envoy_extensions_filters_network_http_connection_manager_v3_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor | null); 'type'?: "header_value_extractor"; } /** * Specifies the mechanism for constructing key fragments which are composed into scope keys. */ -export interface _envoy_config_filter_network_http_connection_manager_v2_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder__Output { +export interface _envoy_extensions_filters_network_http_connection_manager_v3_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder__Output { /** * Specifies how a header field's value should be extracted. */ - 'header_value_extractor'?: (_envoy_config_filter_network_http_connection_manager_v2_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor__Output); + 'header_value_extractor'?: (_envoy_extensions_filters_network_http_connection_manager_v3_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor__Output | null); 'type': "header_value_extractor"; } @@ -45,9 +45,13 @@ export interface _envoy_config_filter_network_http_connection_manager_v2_ScopedR * * Each 'a=b' key-value pair constitutes an 'element' of the header field. */ -export interface _envoy_config_filter_network_http_connection_manager_v2_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor { +export interface _envoy_extensions_filters_network_http_connection_manager_v3_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor { /** * The name of the header field to extract the value from. + * + * .. note:: + * + * If the header appears multiple times only the first value is used. */ 'name'?: (string); /** @@ -66,7 +70,7 @@ export interface _envoy_config_filter_network_http_connection_manager_v2_ScopedR /** * Specifies the key value pair to extract the value from. */ - 'element'?: (_envoy_config_filter_network_http_connection_manager_v2_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement); + 'element'?: (_envoy_extensions_filters_network_http_connection_manager_v3_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement | null); 'extract_type'?: "index"|"element"; } @@ -89,9 +93,13 @@ export interface _envoy_config_filter_network_http_connection_manager_v2_ScopedR * * Each 'a=b' key-value pair constitutes an 'element' of the header field. */ -export interface _envoy_config_filter_network_http_connection_manager_v2_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor__Output { +export interface _envoy_extensions_filters_network_http_connection_manager_v3_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor__Output { /** * The name of the header field to extract the value from. + * + * .. note:: + * + * If the header appears multiple times only the first value is used. */ 'name': (string); /** @@ -110,14 +118,14 @@ export interface _envoy_config_filter_network_http_connection_manager_v2_ScopedR /** * Specifies the key value pair to extract the value from. */ - 'element'?: (_envoy_config_filter_network_http_connection_manager_v2_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement__Output); + 'element'?: (_envoy_extensions_filters_network_http_connection_manager_v3_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement__Output | null); 'extract_type': "index"|"element"; } /** * Specifies a header field's key value pair to match on. */ -export interface _envoy_config_filter_network_http_connection_manager_v2_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement { +export interface _envoy_extensions_filters_network_http_connection_manager_v3_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement { /** * The separator between key and value (e.g., '=' separates 'k=v;...'). * If an element is an empty string, the element is ignored. @@ -135,7 +143,7 @@ export interface _envoy_config_filter_network_http_connection_manager_v2_ScopedR /** * Specifies a header field's key value pair to match on. */ -export interface _envoy_config_filter_network_http_connection_manager_v2_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement__Output { +export interface _envoy_extensions_filters_network_http_connection_manager_v3_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder_HeaderValueExtractor_KvElement__Output { /** * The separator between key and value (e.g., '=' separates 'k=v;...'). * If an element is an empty string, the element is ignored. @@ -152,42 +160,42 @@ export interface _envoy_config_filter_network_http_connection_manager_v2_ScopedR /** * Specifies the mechanism for constructing "scope keys" based on HTTP request attributes. These - * keys are matched against a set of :ref:`Key` - * objects assembled from :ref:`ScopedRouteConfiguration` + * keys are matched against a set of :ref:`Key` + * objects assembled from :ref:`ScopedRouteConfiguration` * messages distributed via SRDS (the Scoped Route Discovery Service) or assigned statically via - * :ref:`scoped_route_configurations_list`. + * :ref:`scoped_route_configurations_list`. * * Upon receiving a request's headers, the Router will build a key using the algorithm specified * by this message. This key will be used to look up the routing table (i.e., the - * :ref:`RouteConfiguration`) to use for the request. + * :ref:`RouteConfiguration`) to use for the request. */ -export interface _envoy_config_filter_network_http_connection_manager_v2_ScopedRoutes_ScopeKeyBuilder { +export interface _envoy_extensions_filters_network_http_connection_manager_v3_ScopedRoutes_ScopeKeyBuilder { /** * The final(built) scope key consists of the ordered union of these fragments, which are compared in order with the - * fragments of a :ref:`ScopedRouteConfiguration`. + * fragments of a :ref:`ScopedRouteConfiguration`. * A missing fragment during comparison will make the key invalid, i.e., the computed key doesn't match any key. */ - 'fragments'?: (_envoy_config_filter_network_http_connection_manager_v2_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder)[]; + 'fragments'?: (_envoy_extensions_filters_network_http_connection_manager_v3_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder)[]; } /** * Specifies the mechanism for constructing "scope keys" based on HTTP request attributes. These - * keys are matched against a set of :ref:`Key` - * objects assembled from :ref:`ScopedRouteConfiguration` + * keys are matched against a set of :ref:`Key` + * objects assembled from :ref:`ScopedRouteConfiguration` * messages distributed via SRDS (the Scoped Route Discovery Service) or assigned statically via - * :ref:`scoped_route_configurations_list`. + * :ref:`scoped_route_configurations_list`. * * Upon receiving a request's headers, the Router will build a key using the algorithm specified * by this message. This key will be used to look up the routing table (i.e., the - * :ref:`RouteConfiguration`) to use for the request. + * :ref:`RouteConfiguration`) to use for the request. */ -export interface _envoy_config_filter_network_http_connection_manager_v2_ScopedRoutes_ScopeKeyBuilder__Output { +export interface _envoy_extensions_filters_network_http_connection_manager_v3_ScopedRoutes_ScopeKeyBuilder__Output { /** * The final(built) scope key consists of the ordered union of these fragments, which are compared in order with the - * fragments of a :ref:`ScopedRouteConfiguration`. + * fragments of a :ref:`ScopedRouteConfiguration`. * A missing fragment during comparison will make the key invalid, i.e., the computed key doesn't match any key. */ - 'fragments': (_envoy_config_filter_network_http_connection_manager_v2_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder__Output)[]; + 'fragments': (_envoy_extensions_filters_network_http_connection_manager_v3_ScopedRoutes_ScopeKeyBuilder_FragmentBuilder__Output)[]; } /** @@ -201,29 +209,29 @@ export interface ScopedRoutes { /** * The algorithm to use for constructing a scope key for each request. */ - 'scope_key_builder'?: (_envoy_config_filter_network_http_connection_manager_v2_ScopedRoutes_ScopeKeyBuilder); + 'scope_key_builder'?: (_envoy_extensions_filters_network_http_connection_manager_v3_ScopedRoutes_ScopeKeyBuilder | null); /** * Configuration source specifier for RDS. * This config source is used to subscribe to RouteConfiguration resources specified in * ScopedRouteConfiguration messages. */ - 'rds_config_source'?: (_envoy_api_v2_core_ConfigSource); + 'rds_config_source'?: (_envoy_config_core_v3_ConfigSource | null); /** * The set of routing scopes corresponding to the HCM. A scope is assigned to a request by * matching a key constructed from the request's attributes according to the algorithm specified * by the - * :ref:`ScopeKeyBuilder` + * :ref:`ScopeKeyBuilder` * in this message. */ - 'scoped_route_configurations_list'?: (_envoy_config_filter_network_http_connection_manager_v2_ScopedRouteConfigurationsList); + 'scoped_route_configurations_list'?: (_envoy_extensions_filters_network_http_connection_manager_v3_ScopedRouteConfigurationsList | null); /** * The set of routing scopes associated with the HCM will be dynamically loaded via the SRDS * API. A scope is assigned to a request by matching a key constructed from the request's * attributes according to the algorithm specified by the - * :ref:`ScopeKeyBuilder` + * :ref:`ScopeKeyBuilder` * in this message. */ - 'scoped_rds'?: (_envoy_config_filter_network_http_connection_manager_v2_ScopedRds); + 'scoped_rds'?: (_envoy_extensions_filters_network_http_connection_manager_v3_ScopedRds | null); 'config_specifier'?: "scoped_route_configurations_list"|"scoped_rds"; } @@ -238,28 +246,28 @@ export interface ScopedRoutes__Output { /** * The algorithm to use for constructing a scope key for each request. */ - 'scope_key_builder'?: (_envoy_config_filter_network_http_connection_manager_v2_ScopedRoutes_ScopeKeyBuilder__Output); + 'scope_key_builder': (_envoy_extensions_filters_network_http_connection_manager_v3_ScopedRoutes_ScopeKeyBuilder__Output | null); /** * Configuration source specifier for RDS. * This config source is used to subscribe to RouteConfiguration resources specified in * ScopedRouteConfiguration messages. */ - 'rds_config_source'?: (_envoy_api_v2_core_ConfigSource__Output); + 'rds_config_source': (_envoy_config_core_v3_ConfigSource__Output | null); /** * The set of routing scopes corresponding to the HCM. A scope is assigned to a request by * matching a key constructed from the request's attributes according to the algorithm specified * by the - * :ref:`ScopeKeyBuilder` + * :ref:`ScopeKeyBuilder` * in this message. */ - 'scoped_route_configurations_list'?: (_envoy_config_filter_network_http_connection_manager_v2_ScopedRouteConfigurationsList__Output); + 'scoped_route_configurations_list'?: (_envoy_extensions_filters_network_http_connection_manager_v3_ScopedRouteConfigurationsList__Output | null); /** * The set of routing scopes associated with the HCM will be dynamically loaded via the SRDS * API. A scope is assigned to a request by matching a key constructed from the request's * attributes according to the algorithm specified by the - * :ref:`ScopeKeyBuilder` + * :ref:`ScopeKeyBuilder` * in this message. */ - 'scoped_rds'?: (_envoy_config_filter_network_http_connection_manager_v2_ScopedRds__Output); + 'scoped_rds'?: (_envoy_extensions_filters_network_http_connection_manager_v3_ScopedRds__Output | null); 'config_specifier': "scoped_route_configurations_list"|"scoped_rds"; } diff --git a/packages/grpc-js-xds/src/generated/envoy/extensions/load_balancing_policies/common/v3/ConsistentHashingLbConfig.ts b/packages/grpc-js-xds/src/generated/envoy/extensions/load_balancing_policies/common/v3/ConsistentHashingLbConfig.ts new file mode 100644 index 000000000..c216720f1 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/extensions/load_balancing_policies/common/v3/ConsistentHashingLbConfig.ts @@ -0,0 +1,67 @@ +// Original file: deps/envoy-api/envoy/extensions/load_balancing_policies/common/v3/common.proto + +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../../google/protobuf/UInt32Value'; + +/** + * Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + */ +export interface ConsistentHashingLbConfig { + /** + * If set to ``true``, the cluster will use hostname instead of the resolved + * address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. + */ + 'use_hostname_for_hashing'?: (boolean); + /** + * Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150 + * no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster. + * If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200. + * Minimum is 100. + * + * Applies to both Ring Hash and Maglev load balancers. + * + * This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified + * ``hash_balance_factor``, requests to any upstream host are capped at ``hash_balance_factor/100`` times the average number of requests + * across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing + * is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify + * the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the + * cascading overflow effect when choosing the next host in the ring/table). + * + * If weights are specified on the hosts, they are respected. + * + * This is an O(N) algorithm, unlike other load balancers. Using a lower ``hash_balance_factor`` results in more hosts + * being probed, so use a higher value if you require better performance. + */ + 'hash_balance_factor'?: (_google_protobuf_UInt32Value | null); +} + +/** + * Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) + */ +export interface ConsistentHashingLbConfig__Output { + /** + * If set to ``true``, the cluster will use hostname instead of the resolved + * address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. + */ + 'use_hostname_for_hashing': (boolean); + /** + * Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150 + * no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster. + * If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200. + * Minimum is 100. + * + * Applies to both Ring Hash and Maglev load balancers. + * + * This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified + * ``hash_balance_factor``, requests to any upstream host are capped at ``hash_balance_factor/100`` times the average number of requests + * across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing + * is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify + * the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the + * cascading overflow effect when choosing the next host in the ring/table). + * + * If weights are specified on the hosts, they are respected. + * + * This is an O(N) algorithm, unlike other load balancers. Using a lower ``hash_balance_factor`` results in more hosts + * being probed, so use a higher value if you require better performance. + */ + 'hash_balance_factor': (_google_protobuf_UInt32Value__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/extensions/load_balancing_policies/common/v3/LocalityLbConfig.ts b/packages/grpc-js-xds/src/generated/envoy/extensions/load_balancing_policies/common/v3/LocalityLbConfig.ts new file mode 100644 index 000000000..4e3d9659e --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/extensions/load_balancing_policies/common/v3/LocalityLbConfig.ts @@ -0,0 +1,100 @@ +// Original file: deps/envoy-api/envoy/extensions/load_balancing_policies/common/v3/common.proto + +import type { Percent as _envoy_type_v3_Percent, Percent__Output as _envoy_type_v3_Percent__Output } from '../../../../../envoy/type/v3/Percent'; +import type { UInt64Value as _google_protobuf_UInt64Value, UInt64Value__Output as _google_protobuf_UInt64Value__Output } from '../../../../../google/protobuf/UInt64Value'; + +/** + * Configuration for :ref:`locality weighted load balancing + * ` + */ +export interface _envoy_extensions_load_balancing_policies_common_v3_LocalityLbConfig_LocalityWeightedLbConfig { +} + +/** + * Configuration for :ref:`locality weighted load balancing + * ` + */ +export interface _envoy_extensions_load_balancing_policies_common_v3_LocalityLbConfig_LocalityWeightedLbConfig__Output { +} + +/** + * Configuration for :ref:`zone aware routing + * `. + */ +export interface _envoy_extensions_load_balancing_policies_common_v3_LocalityLbConfig_ZoneAwareLbConfig { + /** + * Configures percentage of requests that will be considered for zone aware routing + * if zone aware routing is configured. If not specified, the default is 100%. + * * :ref:`runtime values `. + * * :ref:`Zone aware routing support `. + */ + 'routing_enabled'?: (_envoy_type_v3_Percent | null); + /** + * Configures minimum upstream cluster size required for zone aware routing + * If upstream cluster size is less than specified, zone aware routing is not performed + * even if zone aware routing is configured. If not specified, the default is 6. + * * :ref:`runtime values `. + * * :ref:`Zone aware routing support `. + */ + 'min_cluster_size'?: (_google_protobuf_UInt64Value | null); + /** + * If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic + * mode`. Instead, the cluster will fail all + * requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a + * failing service. + */ + 'fail_traffic_on_panic'?: (boolean); +} + +/** + * Configuration for :ref:`zone aware routing + * `. + */ +export interface _envoy_extensions_load_balancing_policies_common_v3_LocalityLbConfig_ZoneAwareLbConfig__Output { + /** + * Configures percentage of requests that will be considered for zone aware routing + * if zone aware routing is configured. If not specified, the default is 100%. + * * :ref:`runtime values `. + * * :ref:`Zone aware routing support `. + */ + 'routing_enabled': (_envoy_type_v3_Percent__Output | null); + /** + * Configures minimum upstream cluster size required for zone aware routing + * If upstream cluster size is less than specified, zone aware routing is not performed + * even if zone aware routing is configured. If not specified, the default is 6. + * * :ref:`runtime values `. + * * :ref:`Zone aware routing support `. + */ + 'min_cluster_size': (_google_protobuf_UInt64Value__Output | null); + /** + * If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic + * mode`. Instead, the cluster will fail all + * requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a + * failing service. + */ + 'fail_traffic_on_panic': (boolean); +} + +export interface LocalityLbConfig { + /** + * Configuration for local zone aware load balancing. + */ + 'zone_aware_lb_config'?: (_envoy_extensions_load_balancing_policies_common_v3_LocalityLbConfig_ZoneAwareLbConfig | null); + /** + * Enable locality weighted load balancing. + */ + 'locality_weighted_lb_config'?: (_envoy_extensions_load_balancing_policies_common_v3_LocalityLbConfig_LocalityWeightedLbConfig | null); + 'locality_config_specifier'?: "zone_aware_lb_config"|"locality_weighted_lb_config"; +} + +export interface LocalityLbConfig__Output { + /** + * Configuration for local zone aware load balancing. + */ + 'zone_aware_lb_config'?: (_envoy_extensions_load_balancing_policies_common_v3_LocalityLbConfig_ZoneAwareLbConfig__Output | null); + /** + * Enable locality weighted load balancing. + */ + 'locality_weighted_lb_config'?: (_envoy_extensions_load_balancing_policies_common_v3_LocalityLbConfig_LocalityWeightedLbConfig__Output | null); + 'locality_config_specifier': "zone_aware_lb_config"|"locality_weighted_lb_config"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/extensions/load_balancing_policies/common/v3/SlowStartConfig.ts b/packages/grpc-js-xds/src/generated/envoy/extensions/load_balancing_policies/common/v3/SlowStartConfig.ts new file mode 100644 index 000000000..bc222ff89 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/extensions/load_balancing_policies/common/v3/SlowStartConfig.ts @@ -0,0 +1,71 @@ +// Original file: deps/envoy-api/envoy/extensions/load_balancing_policies/common/v3/common.proto + +import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../../google/protobuf/Duration'; +import type { RuntimeDouble as _envoy_config_core_v3_RuntimeDouble, RuntimeDouble__Output as _envoy_config_core_v3_RuntimeDouble__Output } from '../../../../../envoy/config/core/v3/RuntimeDouble'; +import type { Percent as _envoy_type_v3_Percent, Percent__Output as _envoy_type_v3_Percent__Output } from '../../../../../envoy/type/v3/Percent'; + +/** + * Configuration for :ref:`slow start mode `. + */ +export interface SlowStartConfig { + /** + * Represents the size of slow start window. + * If set, the newly created host remains in slow start mode starting from its creation time + * for the duration of slow start window. + */ + 'slow_start_window'?: (_google_protobuf_Duration | null); + /** + * This parameter controls the speed of traffic increase over the slow start window. Defaults to 1.0, + * so that endpoint would get linearly increasing amount of traffic. + * When increasing the value for this parameter, the speed of traffic ramp-up increases non-linearly. + * The value of aggression parameter should be greater than 0.0. + * By tuning the parameter, is possible to achieve polynomial or exponential shape of ramp-up curve. + * + * During slow start window, effective weight of an endpoint would be scaled with time factor and aggression: + * ``new_weight = weight * max(min_weight_percent, time_factor ^ (1 / aggression))``, + * where ``time_factor=(time_since_start_seconds / slow_start_time_seconds)``. + * + * As time progresses, more and more traffic would be sent to endpoint, which is in slow start window. + * Once host exits slow start, time_factor and aggression no longer affect its weight. + */ + 'aggression'?: (_envoy_config_core_v3_RuntimeDouble | null); + /** + * Configures the minimum percentage of origin weight that avoids too small new weight, + * which may cause endpoints in slow start mode receive no traffic in slow start window. + * If not specified, the default is 10%. + */ + 'min_weight_percent'?: (_envoy_type_v3_Percent | null); +} + +/** + * Configuration for :ref:`slow start mode `. + */ +export interface SlowStartConfig__Output { + /** + * Represents the size of slow start window. + * If set, the newly created host remains in slow start mode starting from its creation time + * for the duration of slow start window. + */ + 'slow_start_window': (_google_protobuf_Duration__Output | null); + /** + * This parameter controls the speed of traffic increase over the slow start window. Defaults to 1.0, + * so that endpoint would get linearly increasing amount of traffic. + * When increasing the value for this parameter, the speed of traffic ramp-up increases non-linearly. + * The value of aggression parameter should be greater than 0.0. + * By tuning the parameter, is possible to achieve polynomial or exponential shape of ramp-up curve. + * + * During slow start window, effective weight of an endpoint would be scaled with time factor and aggression: + * ``new_weight = weight * max(min_weight_percent, time_factor ^ (1 / aggression))``, + * where ``time_factor=(time_since_start_seconds / slow_start_time_seconds)``. + * + * As time progresses, more and more traffic would be sent to endpoint, which is in slow start window. + * Once host exits slow start, time_factor and aggression no longer affect its weight. + */ + 'aggression': (_envoy_config_core_v3_RuntimeDouble__Output | null); + /** + * Configures the minimum percentage of origin weight that avoids too small new weight, + * which may cause endpoints in slow start mode receive no traffic in slow start window. + * If not specified, the default is 10%. + */ + 'min_weight_percent': (_envoy_type_v3_Percent__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/extensions/load_balancing_policies/pick_first/v3/PickFirst.ts b/packages/grpc-js-xds/src/generated/envoy/extensions/load_balancing_policies/pick_first/v3/PickFirst.ts new file mode 100644 index 000000000..1208575d0 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/extensions/load_balancing_policies/pick_first/v3/PickFirst.ts @@ -0,0 +1,26 @@ +// Original file: deps/envoy-api/envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.proto + + +/** + * This configuration allows the built-in PICK_FIRST LB policy to be configured + * via the LB policy extension point. + */ +export interface PickFirst { + /** + * If set to true, instructs the LB policy to shuffle the list of addresses + * received from the name resolver before attempting to connect to them. + */ + 'shuffle_address_list'?: (boolean); +} + +/** + * This configuration allows the built-in PICK_FIRST LB policy to be configured + * via the LB policy extension point. + */ +export interface PickFirst__Output { + /** + * If set to true, instructs the LB policy to shuffle the list of addresses + * received from the name resolver before attempting to connect to them. + */ + 'shuffle_address_list': (boolean); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/extensions/load_balancing_policies/ring_hash/v3/RingHash.ts b/packages/grpc-js-xds/src/generated/envoy/extensions/load_balancing_policies/ring_hash/v3/RingHash.ts new file mode 100644 index 000000000..d8156fe0f --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/extensions/load_balancing_policies/ring_hash/v3/RingHash.ts @@ -0,0 +1,193 @@ +// Original file: deps/envoy-api/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto + +import type { UInt64Value as _google_protobuf_UInt64Value, UInt64Value__Output as _google_protobuf_UInt64Value__Output } from '../../../../../google/protobuf/UInt64Value'; +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../../google/protobuf/UInt32Value'; +import type { ConsistentHashingLbConfig as _envoy_extensions_load_balancing_policies_common_v3_ConsistentHashingLbConfig, ConsistentHashingLbConfig__Output as _envoy_extensions_load_balancing_policies_common_v3_ConsistentHashingLbConfig__Output } from '../../../../../envoy/extensions/load_balancing_policies/common/v3/ConsistentHashingLbConfig'; +import type { _envoy_extensions_load_balancing_policies_common_v3_LocalityLbConfig_LocalityWeightedLbConfig, _envoy_extensions_load_balancing_policies_common_v3_LocalityLbConfig_LocalityWeightedLbConfig__Output } from '../../../../../envoy/extensions/load_balancing_policies/common/v3/LocalityLbConfig'; + +// Original file: deps/envoy-api/envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto + +/** + * The hash function used to hash hosts onto the ketama ring. + */ +export const _envoy_extensions_load_balancing_policies_ring_hash_v3_RingHash_HashFunction = { + /** + * Currently defaults to XX_HASH. + */ + DEFAULT_HASH: 'DEFAULT_HASH', + /** + * Use `xxHash `_. + */ + XX_HASH: 'XX_HASH', + /** + * Use `MurmurHash2 `_, this is compatible with + * std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled + * on Linux and not macOS. + */ + MURMUR_HASH_2: 'MURMUR_HASH_2', +} as const; + +/** + * The hash function used to hash hosts onto the ketama ring. + */ +export type _envoy_extensions_load_balancing_policies_ring_hash_v3_RingHash_HashFunction = + /** + * Currently defaults to XX_HASH. + */ + | 'DEFAULT_HASH' + | 0 + /** + * Use `xxHash `_. + */ + | 'XX_HASH' + | 1 + /** + * Use `MurmurHash2 `_, this is compatible with + * std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled + * on Linux and not macOS. + */ + | 'MURMUR_HASH_2' + | 2 + +/** + * The hash function used to hash hosts onto the ketama ring. + */ +export type _envoy_extensions_load_balancing_policies_ring_hash_v3_RingHash_HashFunction__Output = typeof _envoy_extensions_load_balancing_policies_ring_hash_v3_RingHash_HashFunction[keyof typeof _envoy_extensions_load_balancing_policies_ring_hash_v3_RingHash_HashFunction] + +/** + * This configuration allows the built-in RING_HASH LB policy to be configured via the LB policy + * extension point. See the :ref:`load balancing architecture overview + * ` for more information. + * [#next-free-field: 8] + */ +export interface RingHash { + /** + * The hash function used to hash hosts onto the ketama ring. The value defaults to + * :ref:`XX_HASH`. + */ + 'hash_function'?: (_envoy_extensions_load_balancing_policies_ring_hash_v3_RingHash_HashFunction); + /** + * Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each + * provided host) the better the request distribution will reflect the desired weights. Defaults + * to 1024 entries, and limited to 8M entries. See also + * :ref:`maximum_ring_size`. + */ + 'minimum_ring_size'?: (_google_protobuf_UInt64Value | null); + /** + * Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered + * to further constrain resource use. See also + * :ref:`minimum_ring_size`. + */ + 'maximum_ring_size'?: (_google_protobuf_UInt64Value | null); + /** + * If set to `true`, the cluster will use hostname instead of the resolved + * address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. + * + * ..note:: + * This is deprecated and please use :ref:`consistent_hashing_lb_config + * ` instead. + * @deprecated + */ + 'use_hostname_for_hashing'?: (boolean); + /** + * Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150 + * no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster. + * If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200. + * Minimum is 100. + * + * This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified + * `hash_balance_factor`, requests to any upstream host are capped at `hash_balance_factor/100` times the average number of requests + * across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing + * is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify + * the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the + * cascading overflow effect when choosing the next host in the ring/table). + * + * If weights are specified on the hosts, they are respected. + * + * This is an O(N) algorithm, unlike other load balancers. Using a lower `hash_balance_factor` results in more hosts + * being probed, so use a higher value if you require better performance. + * + * ..note:: + * This is deprecated and please use :ref:`consistent_hashing_lb_config + * ` instead. + * @deprecated + */ + 'hash_balance_factor'?: (_google_protobuf_UInt32Value | null); + /** + * Common configuration for hashing-based load balancing policies. + */ + 'consistent_hashing_lb_config'?: (_envoy_extensions_load_balancing_policies_common_v3_ConsistentHashingLbConfig | null); + /** + * Enable locality weighted load balancing for ring hash lb explicitly. + */ + 'locality_weighted_lb_config'?: (_envoy_extensions_load_balancing_policies_common_v3_LocalityLbConfig_LocalityWeightedLbConfig | null); +} + +/** + * This configuration allows the built-in RING_HASH LB policy to be configured via the LB policy + * extension point. See the :ref:`load balancing architecture overview + * ` for more information. + * [#next-free-field: 8] + */ +export interface RingHash__Output { + /** + * The hash function used to hash hosts onto the ketama ring. The value defaults to + * :ref:`XX_HASH`. + */ + 'hash_function': (_envoy_extensions_load_balancing_policies_ring_hash_v3_RingHash_HashFunction__Output); + /** + * Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each + * provided host) the better the request distribution will reflect the desired weights. Defaults + * to 1024 entries, and limited to 8M entries. See also + * :ref:`maximum_ring_size`. + */ + 'minimum_ring_size': (_google_protobuf_UInt64Value__Output | null); + /** + * Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered + * to further constrain resource use. See also + * :ref:`minimum_ring_size`. + */ + 'maximum_ring_size': (_google_protobuf_UInt64Value__Output | null); + /** + * If set to `true`, the cluster will use hostname instead of the resolved + * address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. + * + * ..note:: + * This is deprecated and please use :ref:`consistent_hashing_lb_config + * ` instead. + * @deprecated + */ + 'use_hostname_for_hashing': (boolean); + /** + * Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150 + * no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster. + * If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200. + * Minimum is 100. + * + * This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified + * `hash_balance_factor`, requests to any upstream host are capped at `hash_balance_factor/100` times the average number of requests + * across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing + * is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify + * the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the + * cascading overflow effect when choosing the next host in the ring/table). + * + * If weights are specified on the hosts, they are respected. + * + * This is an O(N) algorithm, unlike other load balancers. Using a lower `hash_balance_factor` results in more hosts + * being probed, so use a higher value if you require better performance. + * + * ..note:: + * This is deprecated and please use :ref:`consistent_hashing_lb_config + * ` instead. + * @deprecated + */ + 'hash_balance_factor': (_google_protobuf_UInt32Value__Output | null); + /** + * Common configuration for hashing-based load balancing policies. + */ + 'consistent_hashing_lb_config': (_envoy_extensions_load_balancing_policies_common_v3_ConsistentHashingLbConfig__Output | null); + /** + * Enable locality weighted load balancing for ring hash lb explicitly. + */ + 'locality_weighted_lb_config': (_envoy_extensions_load_balancing_policies_common_v3_LocalityLbConfig_LocalityWeightedLbConfig__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/extensions/load_balancing_policies/wrr_locality/v3/WrrLocality.ts b/packages/grpc-js-xds/src/generated/envoy/extensions/load_balancing_policies/wrr_locality/v3/WrrLocality.ts new file mode 100644 index 000000000..d35fb06e5 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/extensions/load_balancing_policies/wrr_locality/v3/WrrLocality.ts @@ -0,0 +1,25 @@ +// Original file: deps/envoy-api/envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.proto + +import type { LoadBalancingPolicy as _envoy_config_cluster_v3_LoadBalancingPolicy, LoadBalancingPolicy__Output as _envoy_config_cluster_v3_LoadBalancingPolicy__Output } from '../../../../../envoy/config/cluster/v3/LoadBalancingPolicy'; + +/** + * Configuration for the wrr_locality LB policy. See the :ref:`load balancing architecture overview + * ` for more information. + */ +export interface WrrLocality { + /** + * The child LB policy to create for endpoint-picking within the chosen locality. + */ + 'endpoint_picking_policy'?: (_envoy_config_cluster_v3_LoadBalancingPolicy | null); +} + +/** + * Configuration for the wrr_locality LB policy. See the :ref:`load balancing architecture overview + * ` for more information. + */ +export interface WrrLocality__Output { + /** + * The child LB policy to create for endpoint-picking within the chosen locality. + */ + 'endpoint_picking_policy': (_envoy_config_cluster_v3_LoadBalancingPolicy__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/service/discovery/v2/AggregatedDiscoveryService.ts b/packages/grpc-js-xds/src/generated/envoy/service/discovery/v2/AggregatedDiscoveryService.ts deleted file mode 100644 index ec7641dca..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/service/discovery/v2/AggregatedDiscoveryService.ts +++ /dev/null @@ -1,52 +0,0 @@ -// Original file: deps/envoy-api/envoy/service/discovery/v2/ads.proto - -import type * as grpc from '@grpc/grpc-js' -import type { DeltaDiscoveryRequest as _envoy_api_v2_DeltaDiscoveryRequest, DeltaDiscoveryRequest__Output as _envoy_api_v2_DeltaDiscoveryRequest__Output } from '../../../../envoy/api/v2/DeltaDiscoveryRequest'; -import type { DeltaDiscoveryResponse as _envoy_api_v2_DeltaDiscoveryResponse, DeltaDiscoveryResponse__Output as _envoy_api_v2_DeltaDiscoveryResponse__Output } from '../../../../envoy/api/v2/DeltaDiscoveryResponse'; -import type { DiscoveryRequest as _envoy_api_v2_DiscoveryRequest, DiscoveryRequest__Output as _envoy_api_v2_DiscoveryRequest__Output } from '../../../../envoy/api/v2/DiscoveryRequest'; -import type { DiscoveryResponse as _envoy_api_v2_DiscoveryResponse, DiscoveryResponse__Output as _envoy_api_v2_DiscoveryResponse__Output } from '../../../../envoy/api/v2/DiscoveryResponse'; - -/** - * See https://github.com/lyft/envoy-api#apis for a description of the role of - * ADS and how it is intended to be used by a management server. ADS requests - * have the same structure as their singleton xDS counterparts, but can - * multiplex many resource types on a single stream. The type_url in the - * DiscoveryRequest/DiscoveryResponse provides sufficient information to recover - * the multiplexed singleton APIs at the Envoy instance and management server. - */ -export interface AggregatedDiscoveryServiceClient extends grpc.Client { - DeltaAggregatedResources(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_api_v2_DeltaDiscoveryRequest, _envoy_api_v2_DeltaDiscoveryResponse__Output>; - DeltaAggregatedResources(options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_api_v2_DeltaDiscoveryRequest, _envoy_api_v2_DeltaDiscoveryResponse__Output>; - deltaAggregatedResources(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_api_v2_DeltaDiscoveryRequest, _envoy_api_v2_DeltaDiscoveryResponse__Output>; - deltaAggregatedResources(options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_api_v2_DeltaDiscoveryRequest, _envoy_api_v2_DeltaDiscoveryResponse__Output>; - - /** - * This is a gRPC-only API. - */ - StreamAggregatedResources(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_api_v2_DiscoveryRequest, _envoy_api_v2_DiscoveryResponse__Output>; - StreamAggregatedResources(options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_api_v2_DiscoveryRequest, _envoy_api_v2_DiscoveryResponse__Output>; - /** - * This is a gRPC-only API. - */ - streamAggregatedResources(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_api_v2_DiscoveryRequest, _envoy_api_v2_DiscoveryResponse__Output>; - streamAggregatedResources(options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_api_v2_DiscoveryRequest, _envoy_api_v2_DiscoveryResponse__Output>; - -} - -/** - * See https://github.com/lyft/envoy-api#apis for a description of the role of - * ADS and how it is intended to be used by a management server. ADS requests - * have the same structure as their singleton xDS counterparts, but can - * multiplex many resource types on a single stream. The type_url in the - * DiscoveryRequest/DiscoveryResponse provides sufficient information to recover - * the multiplexed singleton APIs at the Envoy instance and management server. - */ -export interface AggregatedDiscoveryServiceHandlers extends grpc.UntypedServiceImplementation { - DeltaAggregatedResources: grpc.handleBidiStreamingCall<_envoy_api_v2_DeltaDiscoveryRequest__Output, _envoy_api_v2_DeltaDiscoveryResponse>; - - /** - * This is a gRPC-only API. - */ - StreamAggregatedResources: grpc.handleBidiStreamingCall<_envoy_api_v2_DiscoveryRequest__Output, _envoy_api_v2_DiscoveryResponse>; - -} diff --git a/packages/grpc-js-xds/src/generated/envoy/service/discovery/v2/AdsDummy.ts b/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/AdsDummy.ts similarity index 86% rename from packages/grpc-js-xds/src/generated/envoy/service/discovery/v2/AdsDummy.ts rename to packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/AdsDummy.ts index eeb6aa6af..c15510877 100644 --- a/packages/grpc-js-xds/src/generated/envoy/service/discovery/v2/AdsDummy.ts +++ b/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/AdsDummy.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/service/discovery/v2/ads.proto +// Original file: deps/envoy-api/envoy/service/discovery/v3/ads.proto /** diff --git a/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/AggregatedDiscoveryService.ts b/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/AggregatedDiscoveryService.ts new file mode 100644 index 000000000..e8d7df1f2 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/AggregatedDiscoveryService.ts @@ -0,0 +1,58 @@ +// Original file: deps/envoy-api/envoy/service/discovery/v3/ads.proto + +import type * as grpc from '@grpc/grpc-js' +import type { MethodDefinition } from '@grpc/proto-loader' +import type { DeltaDiscoveryRequest as _envoy_service_discovery_v3_DeltaDiscoveryRequest, DeltaDiscoveryRequest__Output as _envoy_service_discovery_v3_DeltaDiscoveryRequest__Output } from '../../../../envoy/service/discovery/v3/DeltaDiscoveryRequest'; +import type { DeltaDiscoveryResponse as _envoy_service_discovery_v3_DeltaDiscoveryResponse, DeltaDiscoveryResponse__Output as _envoy_service_discovery_v3_DeltaDiscoveryResponse__Output } from '../../../../envoy/service/discovery/v3/DeltaDiscoveryResponse'; +import type { DiscoveryRequest as _envoy_service_discovery_v3_DiscoveryRequest, DiscoveryRequest__Output as _envoy_service_discovery_v3_DiscoveryRequest__Output } from '../../../../envoy/service/discovery/v3/DiscoveryRequest'; +import type { DiscoveryResponse as _envoy_service_discovery_v3_DiscoveryResponse, DiscoveryResponse__Output as _envoy_service_discovery_v3_DiscoveryResponse__Output } from '../../../../envoy/service/discovery/v3/DiscoveryResponse'; + +/** + * See https://github.com/envoyproxy/envoy-api#apis for a description of the role of + * ADS and how it is intended to be used by a management server. ADS requests + * have the same structure as their singleton xDS counterparts, but can + * multiplex many resource types on a single stream. The type_url in the + * DiscoveryRequest/DiscoveryResponse provides sufficient information to recover + * the multiplexed singleton APIs at the Envoy instance and management server. + */ +export interface AggregatedDiscoveryServiceClient extends grpc.Client { + DeltaAggregatedResources(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_service_discovery_v3_DeltaDiscoveryRequest, _envoy_service_discovery_v3_DeltaDiscoveryResponse__Output>; + DeltaAggregatedResources(options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_service_discovery_v3_DeltaDiscoveryRequest, _envoy_service_discovery_v3_DeltaDiscoveryResponse__Output>; + deltaAggregatedResources(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_service_discovery_v3_DeltaDiscoveryRequest, _envoy_service_discovery_v3_DeltaDiscoveryResponse__Output>; + deltaAggregatedResources(options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_service_discovery_v3_DeltaDiscoveryRequest, _envoy_service_discovery_v3_DeltaDiscoveryResponse__Output>; + + /** + * This is a gRPC-only API. + */ + StreamAggregatedResources(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_service_discovery_v3_DiscoveryRequest, _envoy_service_discovery_v3_DiscoveryResponse__Output>; + StreamAggregatedResources(options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_service_discovery_v3_DiscoveryRequest, _envoy_service_discovery_v3_DiscoveryResponse__Output>; + /** + * This is a gRPC-only API. + */ + streamAggregatedResources(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_service_discovery_v3_DiscoveryRequest, _envoy_service_discovery_v3_DiscoveryResponse__Output>; + streamAggregatedResources(options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_service_discovery_v3_DiscoveryRequest, _envoy_service_discovery_v3_DiscoveryResponse__Output>; + +} + +/** + * See https://github.com/envoyproxy/envoy-api#apis for a description of the role of + * ADS and how it is intended to be used by a management server. ADS requests + * have the same structure as their singleton xDS counterparts, but can + * multiplex many resource types on a single stream. The type_url in the + * DiscoveryRequest/DiscoveryResponse provides sufficient information to recover + * the multiplexed singleton APIs at the Envoy instance and management server. + */ +export interface AggregatedDiscoveryServiceHandlers extends grpc.UntypedServiceImplementation { + DeltaAggregatedResources: grpc.handleBidiStreamingCall<_envoy_service_discovery_v3_DeltaDiscoveryRequest__Output, _envoy_service_discovery_v3_DeltaDiscoveryResponse>; + + /** + * This is a gRPC-only API. + */ + StreamAggregatedResources: grpc.handleBidiStreamingCall<_envoy_service_discovery_v3_DiscoveryRequest__Output, _envoy_service_discovery_v3_DiscoveryResponse>; + +} + +export interface AggregatedDiscoveryServiceDefinition extends grpc.ServiceDefinition { + DeltaAggregatedResources: MethodDefinition<_envoy_service_discovery_v3_DeltaDiscoveryRequest, _envoy_service_discovery_v3_DeltaDiscoveryResponse, _envoy_service_discovery_v3_DeltaDiscoveryRequest__Output, _envoy_service_discovery_v3_DeltaDiscoveryResponse__Output> + StreamAggregatedResources: MethodDefinition<_envoy_service_discovery_v3_DiscoveryRequest, _envoy_service_discovery_v3_DiscoveryResponse, _envoy_service_discovery_v3_DiscoveryRequest__Output, _envoy_service_discovery_v3_DiscoveryResponse__Output> +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/DeltaDiscoveryRequest.ts b/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/DeltaDiscoveryRequest.ts similarity index 75% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/DeltaDiscoveryRequest.ts rename to packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/DeltaDiscoveryRequest.ts index 11cbe8c2c..6c1a3cd95 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/DeltaDiscoveryRequest.ts +++ b/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/DeltaDiscoveryRequest.ts @@ -1,7 +1,8 @@ -// Original file: deps/envoy-api/envoy/api/v2/discovery.proto +// Original file: deps/envoy-api/envoy/service/discovery/v3/discovery.proto -import type { Node as _envoy_api_v2_core_Node, Node__Output as _envoy_api_v2_core_Node__Output } from '../../../envoy/api/v2/core/Node'; -import type { Status as _google_rpc_Status, Status__Output as _google_rpc_Status__Output } from '../../../google/rpc/Status'; +import type { Node as _envoy_config_core_v3_Node, Node__Output as _envoy_config_core_v3_Node__Output } from '../../../../envoy/config/core/v3/Node'; +import type { Status as _google_rpc_Status, Status__Output as _google_rpc_Status__Output } from '../../../../google/rpc/Status'; +import type { ResourceLocator as _envoy_service_discovery_v3_ResourceLocator, ResourceLocator__Output as _envoy_service_discovery_v3_ResourceLocator__Output } from '../../../../envoy/service/discovery/v3/ResourceLocator'; /** * DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC @@ -36,16 +37,18 @@ import type { Status as _google_rpc_Status, Status__Output as _google_rpc_Status * In particular, initial_resource_versions being sent at the "start" of every * gRPC stream actually entails a message for each type_url, each with its own * initial_resource_versions. - * [#next-free-field: 8] + * [#next-free-field: 10] */ export interface DeltaDiscoveryRequest { /** * The node making the request. */ - 'node'?: (_envoy_api_v2_core_Node); + 'node'?: (_envoy_config_core_v3_Node | null); /** * Type of the resource that is being requested, e.g. - * "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". + * ``type.googleapis.com/envoy.api.v2.ClusterLoadAssignment``. This does not need to be set if + * resources are only referenced via ``xds_resource_subscribe`` and + * ``xds_resources_unsubscribe``. */ 'type_url'?: (string); /** @@ -95,11 +98,27 @@ export interface DeltaDiscoveryRequest { */ 'response_nonce'?: (string); /** - * This is populated when the previous :ref:`DiscoveryResponse ` - * failed to update configuration. The *message* field in *error_details* + * This is populated when the previous :ref:`DiscoveryResponse ` + * failed to update configuration. The ``message`` field in ``error_details`` * provides the Envoy internal exception related to the failure. */ - 'error_detail'?: (_google_rpc_Status); + 'error_detail'?: (_google_rpc_Status | null); + /** + * [#not-implemented-hide:] + * Alternative to ``resource_names_subscribe`` field that allows specifying dynamic parameters + * along with each resource name. + * Note that it is legal for a request to have some resources listed + * in ``resource_names_subscribe`` and others in ``resource_locators_subscribe``. + */ + 'resource_locators_subscribe'?: (_envoy_service_discovery_v3_ResourceLocator)[]; + /** + * [#not-implemented-hide:] + * Alternative to ``resource_names_unsubscribe`` field that allows specifying dynamic parameters + * along with each resource name. + * Note that it is legal for a request to have some resources listed + * in ``resource_names_unsubscribe`` and others in ``resource_locators_unsubscribe``. + */ + 'resource_locators_unsubscribe'?: (_envoy_service_discovery_v3_ResourceLocator)[]; } /** @@ -135,16 +154,18 @@ export interface DeltaDiscoveryRequest { * In particular, initial_resource_versions being sent at the "start" of every * gRPC stream actually entails a message for each type_url, each with its own * initial_resource_versions. - * [#next-free-field: 8] + * [#next-free-field: 10] */ export interface DeltaDiscoveryRequest__Output { /** * The node making the request. */ - 'node'?: (_envoy_api_v2_core_Node__Output); + 'node': (_envoy_config_core_v3_Node__Output | null); /** * Type of the resource that is being requested, e.g. - * "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". + * ``type.googleapis.com/envoy.api.v2.ClusterLoadAssignment``. This does not need to be set if + * resources are only referenced via ``xds_resource_subscribe`` and + * ``xds_resources_unsubscribe``. */ 'type_url': (string); /** @@ -194,9 +215,25 @@ export interface DeltaDiscoveryRequest__Output { */ 'response_nonce': (string); /** - * This is populated when the previous :ref:`DiscoveryResponse ` - * failed to update configuration. The *message* field in *error_details* + * This is populated when the previous :ref:`DiscoveryResponse ` + * failed to update configuration. The ``message`` field in ``error_details`` * provides the Envoy internal exception related to the failure. */ - 'error_detail'?: (_google_rpc_Status__Output); + 'error_detail': (_google_rpc_Status__Output | null); + /** + * [#not-implemented-hide:] + * Alternative to ``resource_names_subscribe`` field that allows specifying dynamic parameters + * along with each resource name. + * Note that it is legal for a request to have some resources listed + * in ``resource_names_subscribe`` and others in ``resource_locators_subscribe``. + */ + 'resource_locators_subscribe': (_envoy_service_discovery_v3_ResourceLocator__Output)[]; + /** + * [#not-implemented-hide:] + * Alternative to ``resource_names_unsubscribe`` field that allows specifying dynamic parameters + * along with each resource name. + * Note that it is legal for a request to have some resources listed + * in ``resource_names_unsubscribe`` and others in ``resource_locators_unsubscribe``. + */ + 'resource_locators_unsubscribe': (_envoy_service_discovery_v3_ResourceLocator__Output)[]; } diff --git a/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/DeltaDiscoveryResponse.ts b/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/DeltaDiscoveryResponse.ts new file mode 100644 index 000000000..0728140ee --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/DeltaDiscoveryResponse.ts @@ -0,0 +1,87 @@ +// Original file: deps/envoy-api/envoy/service/discovery/v3/discovery.proto + +import type { Resource as _envoy_service_discovery_v3_Resource, Resource__Output as _envoy_service_discovery_v3_Resource__Output } from '../../../../envoy/service/discovery/v3/Resource'; +import type { ControlPlane as _envoy_config_core_v3_ControlPlane, ControlPlane__Output as _envoy_config_core_v3_ControlPlane__Output } from '../../../../envoy/config/core/v3/ControlPlane'; +import type { ResourceName as _envoy_service_discovery_v3_ResourceName, ResourceName__Output as _envoy_service_discovery_v3_ResourceName__Output } from '../../../../envoy/service/discovery/v3/ResourceName'; + +/** + * [#next-free-field: 9] + */ +export interface DeltaDiscoveryResponse { + /** + * The version of the response data (used for debugging). + */ + 'system_version_info'?: (string); + /** + * The response resources. These are typed resources, whose types must match + * the type_url field. + */ + 'resources'?: (_envoy_service_discovery_v3_Resource)[]; + /** + * Type URL for resources. Identifies the xDS API when muxing over ADS. + * Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty. + */ + 'type_url'?: (string); + /** + * The nonce provides a way for DeltaDiscoveryRequests to uniquely + * reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required. + */ + 'nonce'?: (string); + /** + * Resources names of resources that have be deleted and to be removed from the xDS Client. + * Removed resources for missing resources can be ignored. + */ + 'removed_resources'?: (string)[]; + /** + * [#not-implemented-hide:] + * The control plane instance that sent the response. + */ + 'control_plane'?: (_envoy_config_core_v3_ControlPlane | null); + /** + * Alternative to removed_resources that allows specifying which variant of + * a resource is being removed. This variant must be used for any resource + * for which dynamic parameter constraints were sent to the client. + */ + 'removed_resource_names'?: (_envoy_service_discovery_v3_ResourceName)[]; +} + +/** + * [#next-free-field: 9] + */ +export interface DeltaDiscoveryResponse__Output { + /** + * The version of the response data (used for debugging). + */ + 'system_version_info': (string); + /** + * The response resources. These are typed resources, whose types must match + * the type_url field. + */ + 'resources': (_envoy_service_discovery_v3_Resource__Output)[]; + /** + * Type URL for resources. Identifies the xDS API when muxing over ADS. + * Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty. + */ + 'type_url': (string); + /** + * The nonce provides a way for DeltaDiscoveryRequests to uniquely + * reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required. + */ + 'nonce': (string); + /** + * Resources names of resources that have be deleted and to be removed from the xDS Client. + * Removed resources for missing resources can be ignored. + */ + 'removed_resources': (string)[]; + /** + * [#not-implemented-hide:] + * The control plane instance that sent the response. + */ + 'control_plane': (_envoy_config_core_v3_ControlPlane__Output | null); + /** + * Alternative to removed_resources that allows specifying which variant of + * a resource is being removed. This variant must be used for any resource + * for which dynamic parameter constraints were sent to the client. + */ + 'removed_resource_names': (_envoy_service_discovery_v3_ResourceName__Output)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/DiscoveryRequest.ts b/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/DiscoveryRequest.ts similarity index 68% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/DiscoveryRequest.ts rename to packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/DiscoveryRequest.ts index 2150c41bc..95f1299cb 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/DiscoveryRequest.ts +++ b/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/DiscoveryRequest.ts @@ -1,12 +1,13 @@ -// Original file: deps/envoy-api/envoy/api/v2/discovery.proto +// Original file: deps/envoy-api/envoy/service/discovery/v3/discovery.proto -import type { Node as _envoy_api_v2_core_Node, Node__Output as _envoy_api_v2_core_Node__Output } from '../../../envoy/api/v2/core/Node'; -import type { Status as _google_rpc_Status, Status__Output as _google_rpc_Status__Output } from '../../../google/rpc/Status'; +import type { Node as _envoy_config_core_v3_Node, Node__Output as _envoy_config_core_v3_Node__Output } from '../../../../envoy/config/core/v3/Node'; +import type { Status as _google_rpc_Status, Status__Output as _google_rpc_Status__Output } from '../../../../google/rpc/Status'; +import type { ResourceLocator as _envoy_service_discovery_v3_ResourceLocator, ResourceLocator__Output as _envoy_service_discovery_v3_ResourceLocator__Output } from '../../../../envoy/service/discovery/v3/ResourceLocator'; /** * A DiscoveryRequest requests a set of versioned resources of the same type for * a given Envoy node on some API. - * [#next-free-field: 7] + * [#next-free-field: 8] */ export interface DiscoveryRequest { /** @@ -22,7 +23,7 @@ export interface DiscoveryRequest { /** * The node making the request. */ - 'node'?: (_envoy_api_v2_core_Node); + 'node'?: (_envoy_config_core_v3_Node | null); /** * List of resources to subscribe to, e.g. list of cluster names or a route * configuration name. If this is empty, all resources for the API are @@ -48,18 +49,28 @@ export interface DiscoveryRequest { */ 'response_nonce'?: (string); /** - * This is populated when the previous :ref:`DiscoveryResponse ` - * failed to update configuration. The *message* field in *error_details* provides the Envoy + * This is populated when the previous :ref:`DiscoveryResponse ` + * failed to update configuration. The ``message`` field in ``error_details`` provides the Envoy * internal exception related to the failure. It is only intended for consumption during manual * debugging, the string provided is not guaranteed to be stable across Envoy versions. */ - 'error_detail'?: (_google_rpc_Status); + 'error_detail'?: (_google_rpc_Status | null); + /** + * [#not-implemented-hide:] + * Alternative to ``resource_names`` field that allows specifying dynamic + * parameters along with each resource name. Clients that populate this + * field must be able to handle responses from the server where resources + * are wrapped in a Resource message. + * Note that it is legal for a request to have some resources listed + * in ``resource_names`` and others in ``resource_locators``. + */ + 'resource_locators'?: (_envoy_service_discovery_v3_ResourceLocator)[]; } /** * A DiscoveryRequest requests a set of versioned resources of the same type for * a given Envoy node on some API. - * [#next-free-field: 7] + * [#next-free-field: 8] */ export interface DiscoveryRequest__Output { /** @@ -75,7 +86,7 @@ export interface DiscoveryRequest__Output { /** * The node making the request. */ - 'node'?: (_envoy_api_v2_core_Node__Output); + 'node': (_envoy_config_core_v3_Node__Output | null); /** * List of resources to subscribe to, e.g. list of cluster names or a route * configuration name. If this is empty, all resources for the API are @@ -101,10 +112,20 @@ export interface DiscoveryRequest__Output { */ 'response_nonce': (string); /** - * This is populated when the previous :ref:`DiscoveryResponse ` - * failed to update configuration. The *message* field in *error_details* provides the Envoy + * This is populated when the previous :ref:`DiscoveryResponse ` + * failed to update configuration. The ``message`` field in ``error_details`` provides the Envoy * internal exception related to the failure. It is only intended for consumption during manual * debugging, the string provided is not guaranteed to be stable across Envoy versions. */ - 'error_detail'?: (_google_rpc_Status__Output); + 'error_detail': (_google_rpc_Status__Output | null); + /** + * [#not-implemented-hide:] + * Alternative to ``resource_names`` field that allows specifying dynamic + * parameters along with each resource name. Clients that populate this + * field must be able to handle responses from the server where resources + * are wrapped in a Resource message. + * Note that it is legal for a request to have some resources listed + * in ``resource_names`` and others in ``resource_locators``. + */ + 'resource_locators': (_envoy_service_discovery_v3_ResourceLocator__Output)[]; } diff --git a/packages/grpc-js-xds/src/generated/envoy/api/v2/DiscoveryResponse.ts b/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/DiscoveryResponse.ts similarity index 89% rename from packages/grpc-js-xds/src/generated/envoy/api/v2/DiscoveryResponse.ts rename to packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/DiscoveryResponse.ts index dd7e70d4d..874168317 100644 --- a/packages/grpc-js-xds/src/generated/envoy/api/v2/DiscoveryResponse.ts +++ b/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/DiscoveryResponse.ts @@ -1,7 +1,7 @@ -// Original file: deps/envoy-api/envoy/api/v2/discovery.proto +// Original file: deps/envoy-api/envoy/service/discovery/v3/discovery.proto -import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../google/protobuf/Any'; -import type { ControlPlane as _envoy_api_v2_core_ControlPlane, ControlPlane__Output as _envoy_api_v2_core_ControlPlane__Output } from '../../../envoy/api/v2/core/ControlPlane'; +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; +import type { ControlPlane as _envoy_config_core_v3_ControlPlane, ControlPlane__Output as _envoy_config_core_v3_ControlPlane__Output } from '../../../../envoy/config/core/v3/ControlPlane'; /** * [#next-free-field: 7] @@ -49,10 +49,9 @@ export interface DiscoveryResponse { */ 'nonce'?: (string); /** - * [#not-implemented-hide:] * The control plane instance that sent the response. */ - 'control_plane'?: (_envoy_api_v2_core_ControlPlane); + 'control_plane'?: (_envoy_config_core_v3_ControlPlane | null); } /** @@ -101,8 +100,7 @@ export interface DiscoveryResponse__Output { */ 'nonce': (string); /** - * [#not-implemented-hide:] * The control plane instance that sent the response. */ - 'control_plane'?: (_envoy_api_v2_core_ControlPlane__Output); + 'control_plane': (_envoy_config_core_v3_ControlPlane__Output | null); } diff --git a/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/DynamicParameterConstraints.ts b/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/DynamicParameterConstraints.ts new file mode 100644 index 000000000..5bba10719 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/DynamicParameterConstraints.ts @@ -0,0 +1,119 @@ +// Original file: deps/envoy-api/envoy/service/discovery/v3/discovery.proto + +import type { DynamicParameterConstraints as _envoy_service_discovery_v3_DynamicParameterConstraints, DynamicParameterConstraints__Output as _envoy_service_discovery_v3_DynamicParameterConstraints__Output } from '../../../../envoy/service/discovery/v3/DynamicParameterConstraints'; + +export interface _envoy_service_discovery_v3_DynamicParameterConstraints_ConstraintList { + 'constraints'?: (_envoy_service_discovery_v3_DynamicParameterConstraints)[]; +} + +export interface _envoy_service_discovery_v3_DynamicParameterConstraints_ConstraintList__Output { + 'constraints': (_envoy_service_discovery_v3_DynamicParameterConstraints__Output)[]; +} + +export interface _envoy_service_discovery_v3_DynamicParameterConstraints_SingleConstraint_Exists { +} + +export interface _envoy_service_discovery_v3_DynamicParameterConstraints_SingleConstraint_Exists__Output { +} + +/** + * A single constraint for a given key. + */ +export interface _envoy_service_discovery_v3_DynamicParameterConstraints_SingleConstraint { + /** + * The key to match against. + */ + 'key'?: (string); + /** + * Matches this exact value. + */ + 'value'?: (string); + /** + * Key is present (matches any value except for the key being absent). + * This allows setting a default constraint for clients that do + * not send a key at all, while there may be other clients that need + * special configuration based on that key. + */ + 'exists'?: (_envoy_service_discovery_v3_DynamicParameterConstraints_SingleConstraint_Exists | null); + 'constraint_type'?: "value"|"exists"; +} + +/** + * A single constraint for a given key. + */ +export interface _envoy_service_discovery_v3_DynamicParameterConstraints_SingleConstraint__Output { + /** + * The key to match against. + */ + 'key': (string); + /** + * Matches this exact value. + */ + 'value'?: (string); + /** + * Key is present (matches any value except for the key being absent). + * This allows setting a default constraint for clients that do + * not send a key at all, while there may be other clients that need + * special configuration based on that key. + */ + 'exists'?: (_envoy_service_discovery_v3_DynamicParameterConstraints_SingleConstraint_Exists__Output | null); + 'constraint_type': "value"|"exists"; +} + +/** + * A set of dynamic parameter constraints associated with a variant of an individual xDS resource. + * These constraints determine whether the resource matches a subscription based on the set of + * dynamic parameters in the subscription, as specified in the + * :ref:`ResourceLocator.dynamic_parameters` + * field. This allows xDS implementations (clients, servers, and caching proxies) to determine + * which variant of a resource is appropriate for a given client. + */ +export interface DynamicParameterConstraints { + /** + * A single constraint to evaluate. + */ + 'constraint'?: (_envoy_service_discovery_v3_DynamicParameterConstraints_SingleConstraint | null); + /** + * A list of constraints that match if any one constraint in the list + * matches. + */ + 'or_constraints'?: (_envoy_service_discovery_v3_DynamicParameterConstraints_ConstraintList | null); + /** + * A list of constraints that must all match. + */ + 'and_constraints'?: (_envoy_service_discovery_v3_DynamicParameterConstraints_ConstraintList | null); + /** + * The inverse (NOT) of a set of constraints. + */ + 'not_constraints'?: (_envoy_service_discovery_v3_DynamicParameterConstraints | null); + 'type'?: "constraint"|"or_constraints"|"and_constraints"|"not_constraints"; +} + +/** + * A set of dynamic parameter constraints associated with a variant of an individual xDS resource. + * These constraints determine whether the resource matches a subscription based on the set of + * dynamic parameters in the subscription, as specified in the + * :ref:`ResourceLocator.dynamic_parameters` + * field. This allows xDS implementations (clients, servers, and caching proxies) to determine + * which variant of a resource is appropriate for a given client. + */ +export interface DynamicParameterConstraints__Output { + /** + * A single constraint to evaluate. + */ + 'constraint'?: (_envoy_service_discovery_v3_DynamicParameterConstraints_SingleConstraint__Output | null); + /** + * A list of constraints that match if any one constraint in the list + * matches. + */ + 'or_constraints'?: (_envoy_service_discovery_v3_DynamicParameterConstraints_ConstraintList__Output | null); + /** + * A list of constraints that must all match. + */ + 'and_constraints'?: (_envoy_service_discovery_v3_DynamicParameterConstraints_ConstraintList__Output | null); + /** + * The inverse (NOT) of a set of constraints. + */ + 'not_constraints'?: (_envoy_service_discovery_v3_DynamicParameterConstraints__Output | null); + 'type': "constraint"|"or_constraints"|"and_constraints"|"not_constraints"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/Resource.ts b/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/Resource.ts new file mode 100644 index 000000000..6bef71ff8 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/Resource.ts @@ -0,0 +1,146 @@ +// Original file: deps/envoy-api/envoy/service/discovery/v3/discovery.proto + +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; +import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; +import type { ResourceName as _envoy_service_discovery_v3_ResourceName, ResourceName__Output as _envoy_service_discovery_v3_ResourceName__Output } from '../../../../envoy/service/discovery/v3/ResourceName'; +import type { Metadata as _envoy_config_core_v3_Metadata, Metadata__Output as _envoy_config_core_v3_Metadata__Output } from '../../../../envoy/config/core/v3/Metadata'; + +/** + * Cache control properties for the resource. + * [#not-implemented-hide:] + */ +export interface _envoy_service_discovery_v3_Resource_CacheControl { + /** + * If true, xDS proxies may not cache this resource. + * Note that this does not apply to clients other than xDS proxies, which must cache resources + * for their own use, regardless of the value of this field. + */ + 'do_not_cache'?: (boolean); +} + +/** + * Cache control properties for the resource. + * [#not-implemented-hide:] + */ +export interface _envoy_service_discovery_v3_Resource_CacheControl__Output { + /** + * If true, xDS proxies may not cache this resource. + * Note that this does not apply to clients other than xDS proxies, which must cache resources + * for their own use, regardless of the value of this field. + */ + 'do_not_cache': (boolean); +} + +/** + * [#next-free-field: 10] + */ +export interface Resource { + /** + * The resource level version. It allows xDS to track the state of individual + * resources. + */ + 'version'?: (string); + /** + * The resource being tracked. + */ + 'resource'?: (_google_protobuf_Any | null); + /** + * The resource's name, to distinguish it from others of the same type of resource. + * Only one of ``name`` or ``resource_name`` may be set. + */ + 'name'?: (string); + /** + * The aliases are a list of other names that this resource can go by. + */ + 'aliases'?: (string)[]; + /** + * Time-to-live value for the resource. For each resource, a timer is started. The timer is + * reset each time the resource is received with a new TTL. If the resource is received with + * no TTL set, the timer is removed for the resource. Upon expiration of the timer, the + * configuration for the resource will be removed. + * + * The TTL can be refreshed or changed by sending a response that doesn't change the resource + * version. In this case the resource field does not need to be populated, which allows for + * light-weight "heartbeat" updates to keep a resource with a TTL alive. + * + * The TTL feature is meant to support configurations that should be removed in the event of + * a management server failure. For example, the feature may be used for fault injection + * testing where the fault injection should be terminated in the event that Envoy loses contact + * with the management server. + */ + 'ttl'?: (_google_protobuf_Duration | null); + /** + * Cache control properties for the resource. + * [#not-implemented-hide:] + */ + 'cache_control'?: (_envoy_service_discovery_v3_Resource_CacheControl | null); + /** + * Alternative to the ``name`` field, to be used when the server supports + * multiple variants of the named resource that are differentiated by + * dynamic parameter constraints. + * Only one of ``name`` or ``resource_name`` may be set. + */ + 'resource_name'?: (_envoy_service_discovery_v3_ResourceName | null); + /** + * The Metadata field can be used to provide additional information for the resource. + * E.g. the trace data for debugging. + */ + 'metadata'?: (_envoy_config_core_v3_Metadata | null); +} + +/** + * [#next-free-field: 10] + */ +export interface Resource__Output { + /** + * The resource level version. It allows xDS to track the state of individual + * resources. + */ + 'version': (string); + /** + * The resource being tracked. + */ + 'resource': (_google_protobuf_Any__Output | null); + /** + * The resource's name, to distinguish it from others of the same type of resource. + * Only one of ``name`` or ``resource_name`` may be set. + */ + 'name': (string); + /** + * The aliases are a list of other names that this resource can go by. + */ + 'aliases': (string)[]; + /** + * Time-to-live value for the resource. For each resource, a timer is started. The timer is + * reset each time the resource is received with a new TTL. If the resource is received with + * no TTL set, the timer is removed for the resource. Upon expiration of the timer, the + * configuration for the resource will be removed. + * + * The TTL can be refreshed or changed by sending a response that doesn't change the resource + * version. In this case the resource field does not need to be populated, which allows for + * light-weight "heartbeat" updates to keep a resource with a TTL alive. + * + * The TTL feature is meant to support configurations that should be removed in the event of + * a management server failure. For example, the feature may be used for fault injection + * testing where the fault injection should be terminated in the event that Envoy loses contact + * with the management server. + */ + 'ttl': (_google_protobuf_Duration__Output | null); + /** + * Cache control properties for the resource. + * [#not-implemented-hide:] + */ + 'cache_control': (_envoy_service_discovery_v3_Resource_CacheControl__Output | null); + /** + * Alternative to the ``name`` field, to be used when the server supports + * multiple variants of the named resource that are differentiated by + * dynamic parameter constraints. + * Only one of ``name`` or ``resource_name`` may be set. + */ + 'resource_name': (_envoy_service_discovery_v3_ResourceName__Output | null); + /** + * The Metadata field can be used to provide additional information for the resource. + * E.g. the trace data for debugging. + */ + 'metadata': (_envoy_config_core_v3_Metadata__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/ResourceLocator.ts b/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/ResourceLocator.ts new file mode 100644 index 000000000..c37dc68d3 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/ResourceLocator.ts @@ -0,0 +1,34 @@ +// Original file: deps/envoy-api/envoy/service/discovery/v3/discovery.proto + + +/** + * Specifies a resource to be subscribed to. + */ +export interface ResourceLocator { + /** + * The resource name to subscribe to. + */ + 'name'?: (string); + /** + * A set of dynamic parameters used to match against the dynamic parameter + * constraints on the resource. This allows clients to select between + * multiple variants of the same resource. + */ + 'dynamic_parameters'?: ({[key: string]: string}); +} + +/** + * Specifies a resource to be subscribed to. + */ +export interface ResourceLocator__Output { + /** + * The resource name to subscribe to. + */ + 'name': (string); + /** + * A set of dynamic parameters used to match against the dynamic parameter + * constraints on the resource. This allows clients to select between + * multiple variants of the same resource. + */ + 'dynamic_parameters': ({[key: string]: string}); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/ResourceName.ts b/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/ResourceName.ts new file mode 100644 index 000000000..da7f4fd8a --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/service/discovery/v3/ResourceName.ts @@ -0,0 +1,33 @@ +// Original file: deps/envoy-api/envoy/service/discovery/v3/discovery.proto + +import type { DynamicParameterConstraints as _envoy_service_discovery_v3_DynamicParameterConstraints, DynamicParameterConstraints__Output as _envoy_service_discovery_v3_DynamicParameterConstraints__Output } from '../../../../envoy/service/discovery/v3/DynamicParameterConstraints'; + +/** + * Specifies a concrete resource name. + */ +export interface ResourceName { + /** + * The name of the resource. + */ + 'name'?: (string); + /** + * Dynamic parameter constraints associated with this resource. To be used by client-side caches + * (including xDS proxies) when matching subscribed resource locators. + */ + 'dynamic_parameter_constraints'?: (_envoy_service_discovery_v3_DynamicParameterConstraints | null); +} + +/** + * Specifies a concrete resource name. + */ +export interface ResourceName__Output { + /** + * The name of the resource. + */ + 'name': (string); + /** + * Dynamic parameter constraints associated with this resource. To be used by client-side caches + * (including xDS proxies) when matching subscribed resource locators. + */ + 'dynamic_parameter_constraints': (_envoy_service_discovery_v3_DynamicParameterConstraints__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/service/load_stats/v2/LoadStatsRequest.ts b/packages/grpc-js-xds/src/generated/envoy/service/load_stats/v2/LoadStatsRequest.ts deleted file mode 100644 index a7ac6b8e1..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/service/load_stats/v2/LoadStatsRequest.ts +++ /dev/null @@ -1,34 +0,0 @@ -// Original file: deps/envoy-api/envoy/service/load_stats/v2/lrs.proto - -import type { Node as _envoy_api_v2_core_Node, Node__Output as _envoy_api_v2_core_Node__Output } from '../../../../envoy/api/v2/core/Node'; -import type { ClusterStats as _envoy_api_v2_endpoint_ClusterStats, ClusterStats__Output as _envoy_api_v2_endpoint_ClusterStats__Output } from '../../../../envoy/api/v2/endpoint/ClusterStats'; - -/** - * A load report Envoy sends to the management server. - * [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. - */ -export interface LoadStatsRequest { - /** - * Node identifier for Envoy instance. - */ - 'node'?: (_envoy_api_v2_core_Node); - /** - * A list of load stats to report. - */ - 'cluster_stats'?: (_envoy_api_v2_endpoint_ClusterStats)[]; -} - -/** - * A load report Envoy sends to the management server. - * [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. - */ -export interface LoadStatsRequest__Output { - /** - * Node identifier for Envoy instance. - */ - 'node'?: (_envoy_api_v2_core_Node__Output); - /** - * A list of load stats to report. - */ - 'cluster_stats': (_envoy_api_v2_endpoint_ClusterStats__Output)[]; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/service/load_stats/v2/LoadReportingService.ts b/packages/grpc-js-xds/src/generated/envoy/service/load_stats/v3/LoadReportingService.ts similarity index 81% rename from packages/grpc-js-xds/src/generated/envoy/service/load_stats/v2/LoadReportingService.ts rename to packages/grpc-js-xds/src/generated/envoy/service/load_stats/v3/LoadReportingService.ts index 41e73f0e9..32aa4f96e 100644 --- a/packages/grpc-js-xds/src/generated/envoy/service/load_stats/v2/LoadReportingService.ts +++ b/packages/grpc-js-xds/src/generated/envoy/service/load_stats/v3/LoadReportingService.ts @@ -1,8 +1,9 @@ -// Original file: deps/envoy-api/envoy/service/load_stats/v2/lrs.proto +// Original file: deps/envoy-api/envoy/service/load_stats/v3/lrs.proto import type * as grpc from '@grpc/grpc-js' -import type { LoadStatsRequest as _envoy_service_load_stats_v2_LoadStatsRequest, LoadStatsRequest__Output as _envoy_service_load_stats_v2_LoadStatsRequest__Output } from '../../../../envoy/service/load_stats/v2/LoadStatsRequest'; -import type { LoadStatsResponse as _envoy_service_load_stats_v2_LoadStatsResponse, LoadStatsResponse__Output as _envoy_service_load_stats_v2_LoadStatsResponse__Output } from '../../../../envoy/service/load_stats/v2/LoadStatsResponse'; +import type { MethodDefinition } from '@grpc/proto-loader' +import type { LoadStatsRequest as _envoy_service_load_stats_v3_LoadStatsRequest, LoadStatsRequest__Output as _envoy_service_load_stats_v3_LoadStatsRequest__Output } from '../../../../envoy/service/load_stats/v3/LoadStatsRequest'; +import type { LoadStatsResponse as _envoy_service_load_stats_v3_LoadStatsResponse, LoadStatsResponse__Output as _envoy_service_load_stats_v3_LoadStatsResponse__Output } from '../../../../envoy/service/load_stats/v3/LoadStatsResponse'; export interface LoadReportingServiceClient extends grpc.Client { /** @@ -35,8 +36,8 @@ export interface LoadReportingServiceClient extends grpc.Client { * from around the world, computes global assignment and prepares traffic * assignment destined for each zone Envoys are located in. Goto 2. */ - StreamLoadStats(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_service_load_stats_v2_LoadStatsRequest, _envoy_service_load_stats_v2_LoadStatsResponse__Output>; - StreamLoadStats(options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_service_load_stats_v2_LoadStatsRequest, _envoy_service_load_stats_v2_LoadStatsResponse__Output>; + StreamLoadStats(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_service_load_stats_v3_LoadStatsRequest, _envoy_service_load_stats_v3_LoadStatsResponse__Output>; + StreamLoadStats(options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_service_load_stats_v3_LoadStatsRequest, _envoy_service_load_stats_v3_LoadStatsResponse__Output>; /** * Advanced API to allow for multi-dimensional load balancing by remote * server. For receiving LB assignments, the steps are: @@ -67,8 +68,8 @@ export interface LoadReportingServiceClient extends grpc.Client { * from around the world, computes global assignment and prepares traffic * assignment destined for each zone Envoys are located in. Goto 2. */ - streamLoadStats(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_service_load_stats_v2_LoadStatsRequest, _envoy_service_load_stats_v2_LoadStatsResponse__Output>; - streamLoadStats(options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_service_load_stats_v2_LoadStatsRequest, _envoy_service_load_stats_v2_LoadStatsResponse__Output>; + streamLoadStats(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_service_load_stats_v3_LoadStatsRequest, _envoy_service_load_stats_v3_LoadStatsResponse__Output>; + streamLoadStats(options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_service_load_stats_v3_LoadStatsRequest, _envoy_service_load_stats_v3_LoadStatsResponse__Output>; } @@ -103,6 +104,10 @@ export interface LoadReportingServiceHandlers extends grpc.UntypedServiceImpleme * from around the world, computes global assignment and prepares traffic * assignment destined for each zone Envoys are located in. Goto 2. */ - StreamLoadStats: grpc.handleBidiStreamingCall<_envoy_service_load_stats_v2_LoadStatsRequest__Output, _envoy_service_load_stats_v2_LoadStatsResponse>; + StreamLoadStats: grpc.handleBidiStreamingCall<_envoy_service_load_stats_v3_LoadStatsRequest__Output, _envoy_service_load_stats_v3_LoadStatsResponse>; } + +export interface LoadReportingServiceDefinition extends grpc.ServiceDefinition { + StreamLoadStats: MethodDefinition<_envoy_service_load_stats_v3_LoadStatsRequest, _envoy_service_load_stats_v3_LoadStatsResponse, _envoy_service_load_stats_v3_LoadStatsRequest__Output, _envoy_service_load_stats_v3_LoadStatsResponse__Output> +} diff --git a/packages/grpc-js-xds/src/generated/envoy/service/load_stats/v3/LoadStatsRequest.ts b/packages/grpc-js-xds/src/generated/envoy/service/load_stats/v3/LoadStatsRequest.ts new file mode 100644 index 000000000..e430eb270 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/service/load_stats/v3/LoadStatsRequest.ts @@ -0,0 +1,32 @@ +// Original file: deps/envoy-api/envoy/service/load_stats/v3/lrs.proto + +import type { Node as _envoy_config_core_v3_Node, Node__Output as _envoy_config_core_v3_Node__Output } from '../../../../envoy/config/core/v3/Node'; +import type { ClusterStats as _envoy_config_endpoint_v3_ClusterStats, ClusterStats__Output as _envoy_config_endpoint_v3_ClusterStats__Output } from '../../../../envoy/config/endpoint/v3/ClusterStats'; + +/** + * A load report Envoy sends to the management server. + */ +export interface LoadStatsRequest { + /** + * Node identifier for Envoy instance. + */ + 'node'?: (_envoy_config_core_v3_Node | null); + /** + * A list of load stats to report. + */ + 'cluster_stats'?: (_envoy_config_endpoint_v3_ClusterStats)[]; +} + +/** + * A load report Envoy sends to the management server. + */ +export interface LoadStatsRequest__Output { + /** + * Node identifier for Envoy instance. + */ + 'node': (_envoy_config_core_v3_Node__Output | null); + /** + * A list of load stats to report. + */ + 'cluster_stats': (_envoy_config_endpoint_v3_ClusterStats__Output)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/service/load_stats/v2/LoadStatsResponse.ts b/packages/grpc-js-xds/src/generated/envoy/service/load_stats/v3/LoadStatsResponse.ts similarity index 58% rename from packages/grpc-js-xds/src/generated/envoy/service/load_stats/v2/LoadStatsResponse.ts rename to packages/grpc-js-xds/src/generated/envoy/service/load_stats/v3/LoadStatsResponse.ts index afec8f180..6429d18c8 100644 --- a/packages/grpc-js-xds/src/generated/envoy/service/load_stats/v2/LoadStatsResponse.ts +++ b/packages/grpc-js-xds/src/generated/envoy/service/load_stats/v3/LoadStatsResponse.ts @@ -1,37 +1,37 @@ -// Original file: deps/envoy-api/envoy/service/load_stats/v2/lrs.proto +// Original file: deps/envoy-api/envoy/service/load_stats/v3/lrs.proto import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../../google/protobuf/Duration'; /** * The management server sends envoy a LoadStatsResponse with all clusters it * is interested in learning load stats about. - * [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. */ export interface LoadStatsResponse { /** * Clusters to report stats for. - * Not populated if *send_all_clusters* is true. + * Not populated if ``send_all_clusters`` is true. */ 'clusters'?: (string)[]; /** * The minimum interval of time to collect stats over. This is only a minimum for two reasons: + * * 1. There may be some delay from when the timer fires until stats sampling occurs. - * 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic - * that is observed in between the corresponding previous *LoadStatsRequest* and this - * *LoadStatsResponse* will also be accumulated and billed to the cluster. This avoids a period + * 2. For clusters that were already feature in the previous ``LoadStatsResponse``, any traffic + * that is observed in between the corresponding previous ``LoadStatsRequest`` and this + * ``LoadStatsResponse`` will also be accumulated and billed to the cluster. This avoids a period * of inobservability that might otherwise exists between the messages. New clusters are not * subject to this consideration. */ - 'load_reporting_interval'?: (_google_protobuf_Duration); + 'load_reporting_interval'?: (_google_protobuf_Duration | null); /** - * Set to *true* if the management server supports endpoint granularity + * Set to ``true`` if the management server supports endpoint granularity * report. */ 'report_endpoint_granularity'?: (boolean); /** * If true, the client should send all clusters it knows about. * Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their - * :ref:`client_features` field will honor this field. + * :ref:`client_features` field will honor this field. */ 'send_all_clusters'?: (boolean); } @@ -39,33 +39,33 @@ export interface LoadStatsResponse { /** * The management server sends envoy a LoadStatsResponse with all clusters it * is interested in learning load stats about. - * [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. */ export interface LoadStatsResponse__Output { /** * Clusters to report stats for. - * Not populated if *send_all_clusters* is true. + * Not populated if ``send_all_clusters`` is true. */ 'clusters': (string)[]; /** * The minimum interval of time to collect stats over. This is only a minimum for two reasons: + * * 1. There may be some delay from when the timer fires until stats sampling occurs. - * 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic - * that is observed in between the corresponding previous *LoadStatsRequest* and this - * *LoadStatsResponse* will also be accumulated and billed to the cluster. This avoids a period + * 2. For clusters that were already feature in the previous ``LoadStatsResponse``, any traffic + * that is observed in between the corresponding previous ``LoadStatsRequest`` and this + * ``LoadStatsResponse`` will also be accumulated and billed to the cluster. This avoids a period * of inobservability that might otherwise exists between the messages. New clusters are not * subject to this consideration. */ - 'load_reporting_interval'?: (_google_protobuf_Duration__Output); + 'load_reporting_interval': (_google_protobuf_Duration__Output | null); /** - * Set to *true* if the management server supports endpoint granularity + * Set to ``true`` if the management server supports endpoint granularity * report. */ 'report_endpoint_granularity': (boolean); /** * If true, the client should send all clusters it knows about. * Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their - * :ref:`client_features` field will honor this field. + * :ref:`client_features` field will honor this field. */ 'send_all_clusters': (boolean); } diff --git a/packages/grpc-js-xds/src/generated/envoy/service/status/v3/ClientConfig.ts b/packages/grpc-js-xds/src/generated/envoy/service/status/v3/ClientConfig.ts new file mode 100644 index 000000000..506547d1e --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/service/status/v3/ClientConfig.ts @@ -0,0 +1,161 @@ +// Original file: deps/envoy-api/envoy/service/status/v3/csds.proto + +import type { Node as _envoy_config_core_v3_Node, Node__Output as _envoy_config_core_v3_Node__Output } from '../../../../envoy/config/core/v3/Node'; +import type { PerXdsConfig as _envoy_service_status_v3_PerXdsConfig, PerXdsConfig__Output as _envoy_service_status_v3_PerXdsConfig__Output } from '../../../../envoy/service/status/v3/PerXdsConfig'; +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../../google/protobuf/Any'; +import type { Timestamp as _google_protobuf_Timestamp, Timestamp__Output as _google_protobuf_Timestamp__Output } from '../../../../google/protobuf/Timestamp'; +import type { ConfigStatus as _envoy_service_status_v3_ConfigStatus, ConfigStatus__Output as _envoy_service_status_v3_ConfigStatus__Output } from '../../../../envoy/service/status/v3/ConfigStatus'; +import type { ClientResourceStatus as _envoy_admin_v3_ClientResourceStatus, ClientResourceStatus__Output as _envoy_admin_v3_ClientResourceStatus__Output } from '../../../../envoy/admin/v3/ClientResourceStatus'; +import type { UpdateFailureState as _envoy_admin_v3_UpdateFailureState, UpdateFailureState__Output as _envoy_admin_v3_UpdateFailureState__Output } from '../../../../envoy/admin/v3/UpdateFailureState'; + +/** + * GenericXdsConfig is used to specify the config status and the dump + * of any xDS resource identified by their type URL. It is the generalized + * version of the now deprecated ListenersConfigDump, ClustersConfigDump etc + * [#next-free-field: 10] + */ +export interface _envoy_service_status_v3_ClientConfig_GenericXdsConfig { + /** + * Type_url represents the fully qualified name of xDS resource type + * like envoy.v3.Cluster, envoy.v3.ClusterLoadAssignment etc. + */ + 'type_url'?: (string); + /** + * Name of the xDS resource + */ + 'name'?: (string); + /** + * This is the :ref:`version_info ` + * in the last processed xDS discovery response. If there are only + * static bootstrap listeners, this field will be "" + */ + 'version_info'?: (string); + /** + * The xDS resource config. Actual content depends on the type + */ + 'xds_config'?: (_google_protobuf_Any | null); + /** + * Timestamp when the xDS resource was last updated + */ + 'last_updated'?: (_google_protobuf_Timestamp | null); + /** + * Per xDS resource config status. It is generated by management servers. + * It will not be present if the CSDS server is an xDS client. + */ + 'config_status'?: (_envoy_service_status_v3_ConfigStatus); + /** + * Per xDS resource status from the view of a xDS client + */ + 'client_status'?: (_envoy_admin_v3_ClientResourceStatus); + /** + * Set if the last update failed, cleared after the next successful + * update. The *error_state* field contains the rejected version of + * this particular resource along with the reason and timestamp. For + * successfully updated or acknowledged resource, this field should + * be empty. + * [#not-implemented-hide:] + */ + 'error_state'?: (_envoy_admin_v3_UpdateFailureState | null); + /** + * Is static resource is true if it is specified in the config supplied + * through the file at the startup. + */ + 'is_static_resource'?: (boolean); +} + +/** + * GenericXdsConfig is used to specify the config status and the dump + * of any xDS resource identified by their type URL. It is the generalized + * version of the now deprecated ListenersConfigDump, ClustersConfigDump etc + * [#next-free-field: 10] + */ +export interface _envoy_service_status_v3_ClientConfig_GenericXdsConfig__Output { + /** + * Type_url represents the fully qualified name of xDS resource type + * like envoy.v3.Cluster, envoy.v3.ClusterLoadAssignment etc. + */ + 'type_url': (string); + /** + * Name of the xDS resource + */ + 'name': (string); + /** + * This is the :ref:`version_info ` + * in the last processed xDS discovery response. If there are only + * static bootstrap listeners, this field will be "" + */ + 'version_info': (string); + /** + * The xDS resource config. Actual content depends on the type + */ + 'xds_config': (_google_protobuf_Any__Output | null); + /** + * Timestamp when the xDS resource was last updated + */ + 'last_updated': (_google_protobuf_Timestamp__Output | null); + /** + * Per xDS resource config status. It is generated by management servers. + * It will not be present if the CSDS server is an xDS client. + */ + 'config_status': (_envoy_service_status_v3_ConfigStatus__Output); + /** + * Per xDS resource status from the view of a xDS client + */ + 'client_status': (_envoy_admin_v3_ClientResourceStatus__Output); + /** + * Set if the last update failed, cleared after the next successful + * update. The *error_state* field contains the rejected version of + * this particular resource along with the reason and timestamp. For + * successfully updated or acknowledged resource, this field should + * be empty. + * [#not-implemented-hide:] + */ + 'error_state': (_envoy_admin_v3_UpdateFailureState__Output | null); + /** + * Is static resource is true if it is specified in the config supplied + * through the file at the startup. + */ + 'is_static_resource': (boolean); +} + +/** + * All xds configs for a particular client. + */ +export interface ClientConfig { + /** + * Node for a particular client. + */ + 'node'?: (_envoy_config_core_v3_Node | null); + /** + * This field is deprecated in favor of generic_xds_configs which is + * much simpler and uniform in structure. + * @deprecated + */ + 'xds_config'?: (_envoy_service_status_v3_PerXdsConfig)[]; + /** + * Represents generic xDS config and the exact config structure depends on + * the type URL (like Cluster if it is CDS) + */ + 'generic_xds_configs'?: (_envoy_service_status_v3_ClientConfig_GenericXdsConfig)[]; +} + +/** + * All xds configs for a particular client. + */ +export interface ClientConfig__Output { + /** + * Node for a particular client. + */ + 'node': (_envoy_config_core_v3_Node__Output | null); + /** + * This field is deprecated in favor of generic_xds_configs which is + * much simpler and uniform in structure. + * @deprecated + */ + 'xds_config': (_envoy_service_status_v3_PerXdsConfig__Output)[]; + /** + * Represents generic xDS config and the exact config structure depends on + * the type URL (like Cluster if it is CDS) + */ + 'generic_xds_configs': (_envoy_service_status_v3_ClientConfig_GenericXdsConfig__Output)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/service/status/v3/ClientConfigStatus.ts b/packages/grpc-js-xds/src/generated/envoy/service/status/v3/ClientConfigStatus.ts new file mode 100644 index 000000000..be7a7afd0 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/service/status/v3/ClientConfigStatus.ts @@ -0,0 +1,59 @@ +// Original file: deps/envoy-api/envoy/service/status/v3/csds.proto + +/** + * Config status from a client-side view. + */ +export const ClientConfigStatus = { + /** + * Config status is not available/unknown. + */ + CLIENT_UNKNOWN: 'CLIENT_UNKNOWN', + /** + * Client requested the config but hasn't received any config from management + * server yet. + */ + CLIENT_REQUESTED: 'CLIENT_REQUESTED', + /** + * Client received the config and replied with ACK. + */ + CLIENT_ACKED: 'CLIENT_ACKED', + /** + * Client received the config and replied with NACK. Notably, the attached + * config dump is not the NACKed version, but the most recent accepted one. If + * no config is accepted yet, the attached config dump will be empty. + */ + CLIENT_NACKED: 'CLIENT_NACKED', +} as const; + +/** + * Config status from a client-side view. + */ +export type ClientConfigStatus = + /** + * Config status is not available/unknown. + */ + | 'CLIENT_UNKNOWN' + | 0 + /** + * Client requested the config but hasn't received any config from management + * server yet. + */ + | 'CLIENT_REQUESTED' + | 1 + /** + * Client received the config and replied with ACK. + */ + | 'CLIENT_ACKED' + | 2 + /** + * Client received the config and replied with NACK. Notably, the attached + * config dump is not the NACKed version, but the most recent accepted one. If + * no config is accepted yet, the attached config dump will be empty. + */ + | 'CLIENT_NACKED' + | 3 + +/** + * Config status from a client-side view. + */ +export type ClientConfigStatus__Output = typeof ClientConfigStatus[keyof typeof ClientConfigStatus] diff --git a/packages/grpc-js-xds/src/generated/envoy/service/status/v3/ClientStatusDiscoveryService.ts b/packages/grpc-js-xds/src/generated/envoy/service/status/v3/ClientStatusDiscoveryService.ts new file mode 100644 index 000000000..7402fb69b --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/service/status/v3/ClientStatusDiscoveryService.ts @@ -0,0 +1,45 @@ +// Original file: deps/envoy-api/envoy/service/status/v3/csds.proto + +import type * as grpc from '@grpc/grpc-js' +import type { MethodDefinition } from '@grpc/proto-loader' +import type { ClientStatusRequest as _envoy_service_status_v3_ClientStatusRequest, ClientStatusRequest__Output as _envoy_service_status_v3_ClientStatusRequest__Output } from '../../../../envoy/service/status/v3/ClientStatusRequest'; +import type { ClientStatusResponse as _envoy_service_status_v3_ClientStatusResponse, ClientStatusResponse__Output as _envoy_service_status_v3_ClientStatusResponse__Output } from '../../../../envoy/service/status/v3/ClientStatusResponse'; + +/** + * CSDS is Client Status Discovery Service. It can be used to get the status of + * an xDS-compliant client from the management server's point of view. It can + * also be used to get the current xDS states directly from the client. + */ +export interface ClientStatusDiscoveryServiceClient extends grpc.Client { + FetchClientStatus(argument: _envoy_service_status_v3_ClientStatusRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_envoy_service_status_v3_ClientStatusResponse__Output>): grpc.ClientUnaryCall; + FetchClientStatus(argument: _envoy_service_status_v3_ClientStatusRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_envoy_service_status_v3_ClientStatusResponse__Output>): grpc.ClientUnaryCall; + FetchClientStatus(argument: _envoy_service_status_v3_ClientStatusRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_envoy_service_status_v3_ClientStatusResponse__Output>): grpc.ClientUnaryCall; + FetchClientStatus(argument: _envoy_service_status_v3_ClientStatusRequest, callback: grpc.requestCallback<_envoy_service_status_v3_ClientStatusResponse__Output>): grpc.ClientUnaryCall; + fetchClientStatus(argument: _envoy_service_status_v3_ClientStatusRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_envoy_service_status_v3_ClientStatusResponse__Output>): grpc.ClientUnaryCall; + fetchClientStatus(argument: _envoy_service_status_v3_ClientStatusRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_envoy_service_status_v3_ClientStatusResponse__Output>): grpc.ClientUnaryCall; + fetchClientStatus(argument: _envoy_service_status_v3_ClientStatusRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_envoy_service_status_v3_ClientStatusResponse__Output>): grpc.ClientUnaryCall; + fetchClientStatus(argument: _envoy_service_status_v3_ClientStatusRequest, callback: grpc.requestCallback<_envoy_service_status_v3_ClientStatusResponse__Output>): grpc.ClientUnaryCall; + + StreamClientStatus(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_service_status_v3_ClientStatusRequest, _envoy_service_status_v3_ClientStatusResponse__Output>; + StreamClientStatus(options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_service_status_v3_ClientStatusRequest, _envoy_service_status_v3_ClientStatusResponse__Output>; + streamClientStatus(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_service_status_v3_ClientStatusRequest, _envoy_service_status_v3_ClientStatusResponse__Output>; + streamClientStatus(options?: grpc.CallOptions): grpc.ClientDuplexStream<_envoy_service_status_v3_ClientStatusRequest, _envoy_service_status_v3_ClientStatusResponse__Output>; + +} + +/** + * CSDS is Client Status Discovery Service. It can be used to get the status of + * an xDS-compliant client from the management server's point of view. It can + * also be used to get the current xDS states directly from the client. + */ +export interface ClientStatusDiscoveryServiceHandlers extends grpc.UntypedServiceImplementation { + FetchClientStatus: grpc.handleUnaryCall<_envoy_service_status_v3_ClientStatusRequest__Output, _envoy_service_status_v3_ClientStatusResponse>; + + StreamClientStatus: grpc.handleBidiStreamingCall<_envoy_service_status_v3_ClientStatusRequest__Output, _envoy_service_status_v3_ClientStatusResponse>; + +} + +export interface ClientStatusDiscoveryServiceDefinition extends grpc.ServiceDefinition { + FetchClientStatus: MethodDefinition<_envoy_service_status_v3_ClientStatusRequest, _envoy_service_status_v3_ClientStatusResponse, _envoy_service_status_v3_ClientStatusRequest__Output, _envoy_service_status_v3_ClientStatusResponse__Output> + StreamClientStatus: MethodDefinition<_envoy_service_status_v3_ClientStatusRequest, _envoy_service_status_v3_ClientStatusResponse, _envoy_service_status_v3_ClientStatusRequest__Output, _envoy_service_status_v3_ClientStatusResponse__Output> +} diff --git a/packages/grpc-js-xds/src/generated/envoy/service/status/v3/ClientStatusRequest.ts b/packages/grpc-js-xds/src/generated/envoy/service/status/v3/ClientStatusRequest.ts new file mode 100644 index 000000000..91adddacc --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/service/status/v3/ClientStatusRequest.ts @@ -0,0 +1,34 @@ +// Original file: deps/envoy-api/envoy/service/status/v3/csds.proto + +import type { NodeMatcher as _envoy_type_matcher_v3_NodeMatcher, NodeMatcher__Output as _envoy_type_matcher_v3_NodeMatcher__Output } from '../../../../envoy/type/matcher/v3/NodeMatcher'; +import type { Node as _envoy_config_core_v3_Node, Node__Output as _envoy_config_core_v3_Node__Output } from '../../../../envoy/config/core/v3/Node'; + +/** + * Request for client status of clients identified by a list of NodeMatchers. + */ +export interface ClientStatusRequest { + /** + * Management server can use these match criteria to identify clients. + * The match follows OR semantics. + */ + 'node_matchers'?: (_envoy_type_matcher_v3_NodeMatcher)[]; + /** + * The node making the csds request. + */ + 'node'?: (_envoy_config_core_v3_Node | null); +} + +/** + * Request for client status of clients identified by a list of NodeMatchers. + */ +export interface ClientStatusRequest__Output { + /** + * Management server can use these match criteria to identify clients. + * The match follows OR semantics. + */ + 'node_matchers': (_envoy_type_matcher_v3_NodeMatcher__Output)[]; + /** + * The node making the csds request. + */ + 'node': (_envoy_config_core_v3_Node__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/service/status/v3/ClientStatusResponse.ts b/packages/grpc-js-xds/src/generated/envoy/service/status/v3/ClientStatusResponse.ts new file mode 100644 index 000000000..3611016ec --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/service/status/v3/ClientStatusResponse.ts @@ -0,0 +1,17 @@ +// Original file: deps/envoy-api/envoy/service/status/v3/csds.proto + +import type { ClientConfig as _envoy_service_status_v3_ClientConfig, ClientConfig__Output as _envoy_service_status_v3_ClientConfig__Output } from '../../../../envoy/service/status/v3/ClientConfig'; + +export interface ClientStatusResponse { + /** + * Client configs for the clients specified in the ClientStatusRequest. + */ + 'config'?: (_envoy_service_status_v3_ClientConfig)[]; +} + +export interface ClientStatusResponse__Output { + /** + * Client configs for the clients specified in the ClientStatusRequest. + */ + 'config': (_envoy_service_status_v3_ClientConfig__Output)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/service/status/v3/ConfigStatus.ts b/packages/grpc-js-xds/src/generated/envoy/service/status/v3/ConfigStatus.ts new file mode 100644 index 000000000..15a8359e8 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/service/status/v3/ConfigStatus.ts @@ -0,0 +1,68 @@ +// Original file: deps/envoy-api/envoy/service/status/v3/csds.proto + +/** + * Status of a config from a management server view. + */ +export const ConfigStatus = { + /** + * Status info is not available/unknown. + */ + UNKNOWN: 'UNKNOWN', + /** + * Management server has sent the config to client and received ACK. + */ + SYNCED: 'SYNCED', + /** + * Config is not sent. + */ + NOT_SENT: 'NOT_SENT', + /** + * Management server has sent the config to client but hasn’t received + * ACK/NACK. + */ + STALE: 'STALE', + /** + * Management server has sent the config to client but received NACK. The + * attached config dump will be the latest config (the rejected one), since + * it is the persisted version in the management server. + */ + ERROR: 'ERROR', +} as const; + +/** + * Status of a config from a management server view. + */ +export type ConfigStatus = + /** + * Status info is not available/unknown. + */ + | 'UNKNOWN' + | 0 + /** + * Management server has sent the config to client and received ACK. + */ + | 'SYNCED' + | 1 + /** + * Config is not sent. + */ + | 'NOT_SENT' + | 2 + /** + * Management server has sent the config to client but hasn’t received + * ACK/NACK. + */ + | 'STALE' + | 3 + /** + * Management server has sent the config to client but received NACK. The + * attached config dump will be the latest config (the rejected one), since + * it is the persisted version in the management server. + */ + | 'ERROR' + | 4 + +/** + * Status of a config from a management server view. + */ +export type ConfigStatus__Output = typeof ConfigStatus[keyof typeof ConfigStatus] diff --git a/packages/grpc-js-xds/src/generated/envoy/service/status/v3/PerXdsConfig.ts b/packages/grpc-js-xds/src/generated/envoy/service/status/v3/PerXdsConfig.ts new file mode 100644 index 000000000..d921f3b1c --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/service/status/v3/PerXdsConfig.ts @@ -0,0 +1,69 @@ +// Original file: deps/envoy-api/envoy/service/status/v3/csds.proto + +import type { ConfigStatus as _envoy_service_status_v3_ConfigStatus, ConfigStatus__Output as _envoy_service_status_v3_ConfigStatus__Output } from '../../../../envoy/service/status/v3/ConfigStatus'; +import type { ListenersConfigDump as _envoy_admin_v3_ListenersConfigDump, ListenersConfigDump__Output as _envoy_admin_v3_ListenersConfigDump__Output } from '../../../../envoy/admin/v3/ListenersConfigDump'; +import type { ClustersConfigDump as _envoy_admin_v3_ClustersConfigDump, ClustersConfigDump__Output as _envoy_admin_v3_ClustersConfigDump__Output } from '../../../../envoy/admin/v3/ClustersConfigDump'; +import type { RoutesConfigDump as _envoy_admin_v3_RoutesConfigDump, RoutesConfigDump__Output as _envoy_admin_v3_RoutesConfigDump__Output } from '../../../../envoy/admin/v3/RoutesConfigDump'; +import type { ScopedRoutesConfigDump as _envoy_admin_v3_ScopedRoutesConfigDump, ScopedRoutesConfigDump__Output as _envoy_admin_v3_ScopedRoutesConfigDump__Output } from '../../../../envoy/admin/v3/ScopedRoutesConfigDump'; +import type { EndpointsConfigDump as _envoy_admin_v3_EndpointsConfigDump, EndpointsConfigDump__Output as _envoy_admin_v3_EndpointsConfigDump__Output } from '../../../../envoy/admin/v3/EndpointsConfigDump'; +import type { ClientConfigStatus as _envoy_service_status_v3_ClientConfigStatus, ClientConfigStatus__Output as _envoy_service_status_v3_ClientConfigStatus__Output } from '../../../../envoy/service/status/v3/ClientConfigStatus'; + +/** + * Detailed config (per xDS) with status. + * [#next-free-field: 8] + */ +export interface PerXdsConfig { + /** + * Config status generated by management servers. Will not be present if the + * CSDS server is an xDS client. + */ + 'status'?: (_envoy_service_status_v3_ConfigStatus); + 'listener_config'?: (_envoy_admin_v3_ListenersConfigDump | null); + 'cluster_config'?: (_envoy_admin_v3_ClustersConfigDump | null); + 'route_config'?: (_envoy_admin_v3_RoutesConfigDump | null); + 'scoped_route_config'?: (_envoy_admin_v3_ScopedRoutesConfigDump | null); + 'endpoint_config'?: (_envoy_admin_v3_EndpointsConfigDump | null); + /** + * Client config status is populated by xDS clients. Will not be present if + * the CSDS server is an xDS server. No matter what the client config status + * is, xDS clients should always dump the most recent accepted xDS config. + * + * .. attention:: + * This field is deprecated. Use :ref:`ClientResourceStatus + * ` for per-resource + * config status instead. + * @deprecated + */ + 'client_status'?: (_envoy_service_status_v3_ClientConfigStatus); + 'per_xds_config'?: "listener_config"|"cluster_config"|"route_config"|"scoped_route_config"|"endpoint_config"; +} + +/** + * Detailed config (per xDS) with status. + * [#next-free-field: 8] + */ +export interface PerXdsConfig__Output { + /** + * Config status generated by management servers. Will not be present if the + * CSDS server is an xDS client. + */ + 'status': (_envoy_service_status_v3_ConfigStatus__Output); + 'listener_config'?: (_envoy_admin_v3_ListenersConfigDump__Output | null); + 'cluster_config'?: (_envoy_admin_v3_ClustersConfigDump__Output | null); + 'route_config'?: (_envoy_admin_v3_RoutesConfigDump__Output | null); + 'scoped_route_config'?: (_envoy_admin_v3_ScopedRoutesConfigDump__Output | null); + 'endpoint_config'?: (_envoy_admin_v3_EndpointsConfigDump__Output | null); + /** + * Client config status is populated by xDS clients. Will not be present if + * the CSDS server is an xDS server. No matter what the client config status + * is, xDS clients should always dump the most recent accepted xDS config. + * + * .. attention:: + * This field is deprecated. Use :ref:`ClientResourceStatus + * ` for per-resource + * config status instead. + * @deprecated + */ + 'client_status': (_envoy_service_status_v3_ClientConfigStatus__Output); + 'per_xds_config': "listener_config"|"cluster_config"|"route_config"|"scoped_route_config"|"endpoint_config"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/type/CodecClientType.ts b/packages/grpc-js-xds/src/generated/envoy/type/CodecClientType.ts deleted file mode 100644 index a0bf07351..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/type/CodecClientType.ts +++ /dev/null @@ -1,12 +0,0 @@ -// Original file: deps/envoy-api/envoy/type/http.proto - -export enum CodecClientType { - HTTP1 = 0, - HTTP2 = 1, - /** - * [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with - * caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient - * to distinguish HTTP1 and HTTP2 traffic. - */ - HTTP3 = 2, -} diff --git a/packages/grpc-js-xds/src/generated/envoy/type/http/v3/PathTransformation.ts b/packages/grpc-js-xds/src/generated/envoy/type/http/v3/PathTransformation.ts new file mode 100644 index 000000000..c8aca7e3f --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/type/http/v3/PathTransformation.ts @@ -0,0 +1,92 @@ +// Original file: deps/envoy-api/envoy/type/http/v3/path_transformation.proto + + +/** + * Determines if adjacent slashes are merged into one. A common use case is for a request path + * header. Using this option in ``:ref: PathNormalizationOptions + * `` + * will allow incoming requests with path ``//dir///file`` to match against route with ``prefix`` + * match set to ``/dir``. When using for header transformations, note that slash merging is not + * part of `HTTP spec `_ and is provided for convenience. + */ +export interface _envoy_type_http_v3_PathTransformation_Operation_MergeSlashes { +} + +/** + * Determines if adjacent slashes are merged into one. A common use case is for a request path + * header. Using this option in ``:ref: PathNormalizationOptions + * `` + * will allow incoming requests with path ``//dir///file`` to match against route with ``prefix`` + * match set to ``/dir``. When using for header transformations, note that slash merging is not + * part of `HTTP spec `_ and is provided for convenience. + */ +export interface _envoy_type_http_v3_PathTransformation_Operation_MergeSlashes__Output { +} + +/** + * Should text be normalized according to RFC 3986? This typically is used for path headers + * before any processing of requests by HTTP filters or routing. This applies percent-encoded + * normalization and path segment normalization. Fails on characters disallowed in URLs + * (e.g. NULLs). See `Normalization and Comparison + * `_ for details of normalization. Note that + * this options does not perform `case normalization + * `_ + */ +export interface _envoy_type_http_v3_PathTransformation_Operation_NormalizePathRFC3986 { +} + +/** + * Should text be normalized according to RFC 3986? This typically is used for path headers + * before any processing of requests by HTTP filters or routing. This applies percent-encoded + * normalization and path segment normalization. Fails on characters disallowed in URLs + * (e.g. NULLs). See `Normalization and Comparison + * `_ for details of normalization. Note that + * this options does not perform `case normalization + * `_ + */ +export interface _envoy_type_http_v3_PathTransformation_Operation_NormalizePathRFC3986__Output { +} + +/** + * A type of operation to alter text. + */ +export interface _envoy_type_http_v3_PathTransformation_Operation { + /** + * Enable path normalization per RFC 3986. + */ + 'normalize_path_rfc_3986'?: (_envoy_type_http_v3_PathTransformation_Operation_NormalizePathRFC3986 | null); + /** + * Enable merging adjacent slashes. + */ + 'merge_slashes'?: (_envoy_type_http_v3_PathTransformation_Operation_MergeSlashes | null); + 'operation_specifier'?: "normalize_path_rfc_3986"|"merge_slashes"; +} + +/** + * A type of operation to alter text. + */ +export interface _envoy_type_http_v3_PathTransformation_Operation__Output { + /** + * Enable path normalization per RFC 3986. + */ + 'normalize_path_rfc_3986'?: (_envoy_type_http_v3_PathTransformation_Operation_NormalizePathRFC3986__Output | null); + /** + * Enable merging adjacent slashes. + */ + 'merge_slashes'?: (_envoy_type_http_v3_PathTransformation_Operation_MergeSlashes__Output | null); + 'operation_specifier': "normalize_path_rfc_3986"|"merge_slashes"; +} + +export interface PathTransformation { + /** + * A list of operations to apply. Transformations will be performed in the order that they appear. + */ + 'operations'?: (_envoy_type_http_v3_PathTransformation_Operation)[]; +} + +export interface PathTransformation__Output { + /** + * A list of operations to apply. Transformations will be performed in the order that they appear. + */ + 'operations': (_envoy_type_http_v3_PathTransformation_Operation__Output)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/type/matcher/ListStringMatcher.ts b/packages/grpc-js-xds/src/generated/envoy/type/matcher/ListStringMatcher.ts deleted file mode 100644 index 42bde031b..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/type/matcher/ListStringMatcher.ts +++ /dev/null @@ -1,17 +0,0 @@ -// Original file: deps/envoy-api/envoy/type/matcher/string.proto - -import type { StringMatcher as _envoy_type_matcher_StringMatcher, StringMatcher__Output as _envoy_type_matcher_StringMatcher__Output } from '../../../envoy/type/matcher/StringMatcher'; - -/** - * Specifies a list of ways to match a string. - */ -export interface ListStringMatcher { - 'patterns'?: (_envoy_type_matcher_StringMatcher)[]; -} - -/** - * Specifies a list of ways to match a string. - */ -export interface ListStringMatcher__Output { - 'patterns': (_envoy_type_matcher_StringMatcher__Output)[]; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/type/matcher/RegexMatcher.ts b/packages/grpc-js-xds/src/generated/envoy/type/matcher/RegexMatcher.ts deleted file mode 100644 index 8a18816b0..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/type/matcher/RegexMatcher.ts +++ /dev/null @@ -1,69 +0,0 @@ -// Original file: deps/envoy-api/envoy/type/matcher/regex.proto - -import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../google/protobuf/UInt32Value'; - -/** - * Google's `RE2 `_ regex engine. The regex string must adhere to - * the documented `syntax `_. The engine is designed - * to complete execution in linear time as well as limit the amount of memory used. - */ -export interface _envoy_type_matcher_RegexMatcher_GoogleRE2 { - /** - * This field controls the RE2 "program size" which is a rough estimate of how complex a - * compiled regex is to evaluate. A regex that has a program size greater than the configured - * value will fail to compile. In this case, the configured max program size can be increased - * or the regex can be simplified. If not specified, the default is 100. - * - * This field is deprecated; regexp validation should be performed on the management server - * instead of being done by each individual client. - */ - 'max_program_size'?: (_google_protobuf_UInt32Value); -} - -/** - * Google's `RE2 `_ regex engine. The regex string must adhere to - * the documented `syntax `_. The engine is designed - * to complete execution in linear time as well as limit the amount of memory used. - */ -export interface _envoy_type_matcher_RegexMatcher_GoogleRE2__Output { - /** - * This field controls the RE2 "program size" which is a rough estimate of how complex a - * compiled regex is to evaluate. A regex that has a program size greater than the configured - * value will fail to compile. In this case, the configured max program size can be increased - * or the regex can be simplified. If not specified, the default is 100. - * - * This field is deprecated; regexp validation should be performed on the management server - * instead of being done by each individual client. - */ - 'max_program_size'?: (_google_protobuf_UInt32Value__Output); -} - -/** - * A regex matcher designed for safety when used with untrusted input. - */ -export interface RegexMatcher { - /** - * Google's RE2 regex engine. - */ - 'google_re2'?: (_envoy_type_matcher_RegexMatcher_GoogleRE2); - /** - * The regex match string. The string must be supported by the configured engine. - */ - 'regex'?: (string); - 'engine_type'?: "google_re2"; -} - -/** - * A regex matcher designed for safety when used with untrusted input. - */ -export interface RegexMatcher__Output { - /** - * Google's RE2 regex engine. - */ - 'google_re2'?: (_envoy_type_matcher_RegexMatcher_GoogleRE2__Output); - /** - * The regex match string. The string must be supported by the configured engine. - */ - 'regex': (string); - 'engine_type': "google_re2"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/DoubleMatcher.ts b/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/DoubleMatcher.ts new file mode 100644 index 000000000..0bf3bca79 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/DoubleMatcher.ts @@ -0,0 +1,35 @@ +// Original file: deps/envoy-api/envoy/type/matcher/v3/number.proto + +import type { DoubleRange as _envoy_type_v3_DoubleRange, DoubleRange__Output as _envoy_type_v3_DoubleRange__Output } from '../../../../envoy/type/v3/DoubleRange'; + +/** + * Specifies the way to match a double value. + */ +export interface DoubleMatcher { + /** + * If specified, the input double value must be in the range specified here. + * Note: The range is using half-open interval semantics [start, end). + */ + 'range'?: (_envoy_type_v3_DoubleRange | null); + /** + * If specified, the input double value must be equal to the value specified here. + */ + 'exact'?: (number | string); + 'match_pattern'?: "range"|"exact"; +} + +/** + * Specifies the way to match a double value. + */ +export interface DoubleMatcher__Output { + /** + * If specified, the input double value must be in the range specified here. + * Note: The range is using half-open interval semantics [start, end). + */ + 'range'?: (_envoy_type_v3_DoubleRange__Output | null); + /** + * If specified, the input double value must be equal to the value specified here. + */ + 'exact'?: (number); + 'match_pattern': "range"|"exact"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/ListMatcher.ts b/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/ListMatcher.ts new file mode 100644 index 000000000..10bf5567c --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/ListMatcher.ts @@ -0,0 +1,25 @@ +// Original file: deps/envoy-api/envoy/type/matcher/v3/value.proto + +import type { ValueMatcher as _envoy_type_matcher_v3_ValueMatcher, ValueMatcher__Output as _envoy_type_matcher_v3_ValueMatcher__Output } from '../../../../envoy/type/matcher/v3/ValueMatcher'; + +/** + * Specifies the way to match a list value. + */ +export interface ListMatcher { + /** + * If specified, at least one of the values in the list must match the value specified. + */ + 'one_of'?: (_envoy_type_matcher_v3_ValueMatcher | null); + 'match_pattern'?: "one_of"; +} + +/** + * Specifies the way to match a list value. + */ +export interface ListMatcher__Output { + /** + * If specified, at least one of the values in the list must match the value specified. + */ + 'one_of'?: (_envoy_type_matcher_v3_ValueMatcher__Output | null); + 'match_pattern': "one_of"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/ListStringMatcher.ts b/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/ListStringMatcher.ts new file mode 100644 index 000000000..4d5062382 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/ListStringMatcher.ts @@ -0,0 +1,17 @@ +// Original file: deps/envoy-api/envoy/type/matcher/v3/string.proto + +import type { StringMatcher as _envoy_type_matcher_v3_StringMatcher, StringMatcher__Output as _envoy_type_matcher_v3_StringMatcher__Output } from '../../../../envoy/type/matcher/v3/StringMatcher'; + +/** + * Specifies a list of ways to match a string. + */ +export interface ListStringMatcher { + 'patterns'?: (_envoy_type_matcher_v3_StringMatcher)[]; +} + +/** + * Specifies a list of ways to match a string. + */ +export interface ListStringMatcher__Output { + 'patterns': (_envoy_type_matcher_v3_StringMatcher__Output)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/MetadataMatcher.ts b/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/MetadataMatcher.ts new file mode 100644 index 000000000..78d4f03da --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/MetadataMatcher.ts @@ -0,0 +1,73 @@ +// Original file: deps/envoy-api/envoy/type/matcher/v3/metadata.proto + +import type { ValueMatcher as _envoy_type_matcher_v3_ValueMatcher, ValueMatcher__Output as _envoy_type_matcher_v3_ValueMatcher__Output } from '../../../../envoy/type/matcher/v3/ValueMatcher'; + +/** + * Specifies the segment in a path to retrieve value from Metadata. + * Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that + * if the segment key refers to a list, it has to be the last segment in a path. + */ +export interface _envoy_type_matcher_v3_MetadataMatcher_PathSegment { + /** + * If specified, use the key to retrieve the value in a Struct. + */ + 'key'?: (string); + 'segment'?: "key"; +} + +/** + * Specifies the segment in a path to retrieve value from Metadata. + * Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that + * if the segment key refers to a list, it has to be the last segment in a path. + */ +export interface _envoy_type_matcher_v3_MetadataMatcher_PathSegment__Output { + /** + * If specified, use the key to retrieve the value in a Struct. + */ + 'key'?: (string); + 'segment': "key"; +} + +/** + * [#next-major-version: MetadataMatcher should use StructMatcher] + */ +export interface MetadataMatcher { + /** + * The filter name to retrieve the Struct from the Metadata. + */ + 'filter'?: (string); + /** + * The path to retrieve the Value from the Struct. + */ + 'path'?: (_envoy_type_matcher_v3_MetadataMatcher_PathSegment)[]; + /** + * The MetadataMatcher is matched if the value retrieved by path is matched to this value. + */ + 'value'?: (_envoy_type_matcher_v3_ValueMatcher | null); + /** + * If true, the match result will be inverted. + */ + 'invert'?: (boolean); +} + +/** + * [#next-major-version: MetadataMatcher should use StructMatcher] + */ +export interface MetadataMatcher__Output { + /** + * The filter name to retrieve the Struct from the Metadata. + */ + 'filter': (string); + /** + * The path to retrieve the Value from the Struct. + */ + 'path': (_envoy_type_matcher_v3_MetadataMatcher_PathSegment__Output)[]; + /** + * The MetadataMatcher is matched if the value retrieved by path is matched to this value. + */ + 'value': (_envoy_type_matcher_v3_ValueMatcher__Output | null); + /** + * If true, the match result will be inverted. + */ + 'invert': (boolean); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/NodeMatcher.ts b/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/NodeMatcher.ts new file mode 100644 index 000000000..7e31b9753 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/NodeMatcher.ts @@ -0,0 +1,34 @@ +// Original file: deps/envoy-api/envoy/type/matcher/v3/node.proto + +import type { StringMatcher as _envoy_type_matcher_v3_StringMatcher, StringMatcher__Output as _envoy_type_matcher_v3_StringMatcher__Output } from '../../../../envoy/type/matcher/v3/StringMatcher'; +import type { StructMatcher as _envoy_type_matcher_v3_StructMatcher, StructMatcher__Output as _envoy_type_matcher_v3_StructMatcher__Output } from '../../../../envoy/type/matcher/v3/StructMatcher'; + +/** + * Specifies the way to match a Node. + * The match follows AND semantics. + */ +export interface NodeMatcher { + /** + * Specifies match criteria on the node id. + */ + 'node_id'?: (_envoy_type_matcher_v3_StringMatcher | null); + /** + * Specifies match criteria on the node metadata. + */ + 'node_metadatas'?: (_envoy_type_matcher_v3_StructMatcher)[]; +} + +/** + * Specifies the way to match a Node. + * The match follows AND semantics. + */ +export interface NodeMatcher__Output { + /** + * Specifies match criteria on the node id. + */ + 'node_id': (_envoy_type_matcher_v3_StringMatcher__Output | null); + /** + * Specifies match criteria on the node metadata. + */ + 'node_metadatas': (_envoy_type_matcher_v3_StructMatcher__Output)[]; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/type/matcher/RegexMatchAndSubstitute.ts b/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/RegexMatchAndSubstitute.ts similarity index 88% rename from packages/grpc-js-xds/src/generated/envoy/type/matcher/RegexMatchAndSubstitute.ts rename to packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/RegexMatchAndSubstitute.ts index 17c38c7c0..3796765a7 100644 --- a/packages/grpc-js-xds/src/generated/envoy/type/matcher/RegexMatchAndSubstitute.ts +++ b/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/RegexMatchAndSubstitute.ts @@ -1,6 +1,6 @@ -// Original file: deps/envoy-api/envoy/type/matcher/regex.proto +// Original file: deps/envoy-api/envoy/type/matcher/v3/regex.proto -import type { RegexMatcher as _envoy_type_matcher_RegexMatcher, RegexMatcher__Output as _envoy_type_matcher_RegexMatcher__Output } from '../../../envoy/type/matcher/RegexMatcher'; +import type { RegexMatcher as _envoy_type_matcher_v3_RegexMatcher, RegexMatcher__Output as _envoy_type_matcher_v3_RegexMatcher__Output } from '../../../../envoy/type/matcher/v3/RegexMatcher'; /** * Describes how to match a string and then produce a new string using a regular @@ -18,7 +18,7 @@ export interface RegexMatchAndSubstitute { * used in the pattern to extract portions of the subject string, and then * referenced in the substitution string. */ - 'pattern'?: (_envoy_type_matcher_RegexMatcher); + 'pattern'?: (_envoy_type_matcher_v3_RegexMatcher | null); /** * The string that should be substituted into matching portions of the * subject string during a substitution operation to produce a new string. @@ -49,7 +49,7 @@ export interface RegexMatchAndSubstitute__Output { * used in the pattern to extract portions of the subject string, and then * referenced in the substitution string. */ - 'pattern'?: (_envoy_type_matcher_RegexMatcher__Output); + 'pattern': (_envoy_type_matcher_v3_RegexMatcher__Output | null); /** * The string that should be substituted into matching portions of the * subject string during a substitution operation to produce a new string. diff --git a/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/RegexMatcher.ts b/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/RegexMatcher.ts new file mode 100644 index 000000000..19517678f --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/RegexMatcher.ts @@ -0,0 +1,105 @@ +// Original file: deps/envoy-api/envoy/type/matcher/v3/regex.proto + +import type { UInt32Value as _google_protobuf_UInt32Value, UInt32Value__Output as _google_protobuf_UInt32Value__Output } from '../../../../google/protobuf/UInt32Value'; + +/** + * Google's `RE2 `_ regex engine. The regex string must adhere to + * the documented `syntax `_. The engine is designed + * to complete execution in linear time as well as limit the amount of memory used. + * + * Envoy supports program size checking via runtime. The runtime keys ``re2.max_program_size.error_level`` + * and ``re2.max_program_size.warn_level`` can be set to integers as the maximum program size or + * complexity that a compiled regex can have before an exception is thrown or a warning is + * logged, respectively. ``re2.max_program_size.error_level`` defaults to 100, and + * ``re2.max_program_size.warn_level`` has no default if unset (will not check/log a warning). + * + * Envoy emits two stats for tracking the program size of regexes: the histogram ``re2.program_size``, + * which records the program size, and the counter ``re2.exceeded_warn_level``, which is incremented + * each time the program size exceeds the warn level threshold. + */ +export interface _envoy_type_matcher_v3_RegexMatcher_GoogleRE2 { + /** + * This field controls the RE2 "program size" which is a rough estimate of how complex a + * compiled regex is to evaluate. A regex that has a program size greater than the configured + * value will fail to compile. In this case, the configured max program size can be increased + * or the regex can be simplified. If not specified, the default is 100. + * + * This field is deprecated; regexp validation should be performed on the management server + * instead of being done by each individual client. + * + * .. note:: + * + * Although this field is deprecated, the program size will still be checked against the + * global ``re2.max_program_size.error_level`` runtime value. + * @deprecated + */ + 'max_program_size'?: (_google_protobuf_UInt32Value | null); +} + +/** + * Google's `RE2 `_ regex engine. The regex string must adhere to + * the documented `syntax `_. The engine is designed + * to complete execution in linear time as well as limit the amount of memory used. + * + * Envoy supports program size checking via runtime. The runtime keys ``re2.max_program_size.error_level`` + * and ``re2.max_program_size.warn_level`` can be set to integers as the maximum program size or + * complexity that a compiled regex can have before an exception is thrown or a warning is + * logged, respectively. ``re2.max_program_size.error_level`` defaults to 100, and + * ``re2.max_program_size.warn_level`` has no default if unset (will not check/log a warning). + * + * Envoy emits two stats for tracking the program size of regexes: the histogram ``re2.program_size``, + * which records the program size, and the counter ``re2.exceeded_warn_level``, which is incremented + * each time the program size exceeds the warn level threshold. + */ +export interface _envoy_type_matcher_v3_RegexMatcher_GoogleRE2__Output { + /** + * This field controls the RE2 "program size" which is a rough estimate of how complex a + * compiled regex is to evaluate. A regex that has a program size greater than the configured + * value will fail to compile. In this case, the configured max program size can be increased + * or the regex can be simplified. If not specified, the default is 100. + * + * This field is deprecated; regexp validation should be performed on the management server + * instead of being done by each individual client. + * + * .. note:: + * + * Although this field is deprecated, the program size will still be checked against the + * global ``re2.max_program_size.error_level`` runtime value. + * @deprecated + */ + 'max_program_size': (_google_protobuf_UInt32Value__Output | null); +} + +/** + * A regex matcher designed for safety when used with untrusted input. + */ +export interface RegexMatcher { + /** + * Google's RE2 regex engine. + * @deprecated + */ + 'google_re2'?: (_envoy_type_matcher_v3_RegexMatcher_GoogleRE2 | null); + /** + * The regex match string. The string must be supported by the configured engine. The regex is matched + * against the full string, not as a partial match. + */ + 'regex'?: (string); + 'engine_type'?: "google_re2"; +} + +/** + * A regex matcher designed for safety when used with untrusted input. + */ +export interface RegexMatcher__Output { + /** + * Google's RE2 regex engine. + * @deprecated + */ + 'google_re2'?: (_envoy_type_matcher_v3_RegexMatcher_GoogleRE2__Output | null); + /** + * The regex match string. The string must be supported by the configured engine. The regex is matched + * against the full string, not as a partial match. + */ + 'regex': (string); + 'engine_type': "google_re2"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/StringMatcher.ts b/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/StringMatcher.ts new file mode 100644 index 000000000..181d59d54 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/StringMatcher.ts @@ -0,0 +1,109 @@ +// Original file: deps/envoy-api/envoy/type/matcher/v3/string.proto + +import type { RegexMatcher as _envoy_type_matcher_v3_RegexMatcher, RegexMatcher__Output as _envoy_type_matcher_v3_RegexMatcher__Output } from '../../../../envoy/type/matcher/v3/RegexMatcher'; + +/** + * Specifies the way to match a string. + * [#next-free-field: 8] + */ +export interface StringMatcher { + /** + * The input string must match exactly the string specified here. + * + * Examples: + * + * * ``abc`` only matches the value ``abc``. + */ + 'exact'?: (string); + /** + * The input string must have the prefix specified here. + * Note: empty prefix is not allowed, please use regex instead. + * + * Examples: + * + * * ``abc`` matches the value ``abc.xyz`` + */ + 'prefix'?: (string); + /** + * The input string must have the suffix specified here. + * Note: empty prefix is not allowed, please use regex instead. + * + * Examples: + * + * * ``abc`` matches the value ``xyz.abc`` + */ + 'suffix'?: (string); + /** + * The input string must match the regular expression specified here. + */ + 'safe_regex'?: (_envoy_type_matcher_v3_RegexMatcher | null); + /** + * If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. This + * has no effect for the safe_regex match. + * For example, the matcher ``data`` will match both input string ``Data`` and ``data`` if set to true. + */ + 'ignore_case'?: (boolean); + /** + * The input string must have the substring specified here. + * Note: empty contains match is not allowed, please use regex instead. + * + * Examples: + * + * * ``abc`` matches the value ``xyz.abc.def`` + */ + 'contains'?: (string); + 'match_pattern'?: "exact"|"prefix"|"suffix"|"safe_regex"|"contains"; +} + +/** + * Specifies the way to match a string. + * [#next-free-field: 8] + */ +export interface StringMatcher__Output { + /** + * The input string must match exactly the string specified here. + * + * Examples: + * + * * ``abc`` only matches the value ``abc``. + */ + 'exact'?: (string); + /** + * The input string must have the prefix specified here. + * Note: empty prefix is not allowed, please use regex instead. + * + * Examples: + * + * * ``abc`` matches the value ``abc.xyz`` + */ + 'prefix'?: (string); + /** + * The input string must have the suffix specified here. + * Note: empty prefix is not allowed, please use regex instead. + * + * Examples: + * + * * ``abc`` matches the value ``xyz.abc`` + */ + 'suffix'?: (string); + /** + * The input string must match the regular expression specified here. + */ + 'safe_regex'?: (_envoy_type_matcher_v3_RegexMatcher__Output | null); + /** + * If true, indicates the exact/prefix/suffix/contains matching should be case insensitive. This + * has no effect for the safe_regex match. + * For example, the matcher ``data`` will match both input string ``Data`` and ``data`` if set to true. + */ + 'ignore_case': (boolean); + /** + * The input string must have the substring specified here. + * Note: empty contains match is not allowed, please use regex instead. + * + * Examples: + * + * * ``abc`` matches the value ``xyz.abc.def`` + */ + 'contains'?: (string); + 'match_pattern': "exact"|"prefix"|"suffix"|"safe_regex"|"contains"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/StructMatcher.ts b/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/StructMatcher.ts new file mode 100644 index 000000000..22afe24a9 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/StructMatcher.ts @@ -0,0 +1,153 @@ +// Original file: deps/envoy-api/envoy/type/matcher/v3/struct.proto + +import type { ValueMatcher as _envoy_type_matcher_v3_ValueMatcher, ValueMatcher__Output as _envoy_type_matcher_v3_ValueMatcher__Output } from '../../../../envoy/type/matcher/v3/ValueMatcher'; + +/** + * Specifies the segment in a path to retrieve value from Struct. + */ +export interface _envoy_type_matcher_v3_StructMatcher_PathSegment { + /** + * If specified, use the key to retrieve the value in a Struct. + */ + 'key'?: (string); + 'segment'?: "key"; +} + +/** + * Specifies the segment in a path to retrieve value from Struct. + */ +export interface _envoy_type_matcher_v3_StructMatcher_PathSegment__Output { + /** + * If specified, use the key to retrieve the value in a Struct. + */ + 'key'?: (string); + 'segment': "key"; +} + +/** + * StructMatcher provides a general interface to check if a given value is matched in + * google.protobuf.Struct. It uses ``path`` to retrieve the value + * from the struct and then check if it's matched to the specified value. + * + * For example, for the following Struct: + * + * .. code-block:: yaml + * + * fields: + * a: + * struct_value: + * fields: + * b: + * struct_value: + * fields: + * c: + * string_value: pro + * t: + * list_value: + * values: + * - string_value: m + * - string_value: n + * + * The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" + * from the Metadata which is matched to the specified prefix match. + * + * .. code-block:: yaml + * + * path: + * - key: a + * - key: b + * - key: c + * value: + * string_match: + * prefix: pr + * + * The following StructMatcher is matched as the code will match one of the string values in the + * list at the path [a, t]. + * + * .. code-block:: yaml + * + * path: + * - key: a + * - key: t + * value: + * list_match: + * one_of: + * string_match: + * exact: m + * + * An example use of StructMatcher is to match metadata in envoy.v*.core.Node. + */ +export interface StructMatcher { + /** + * The path to retrieve the Value from the Struct. + */ + 'path'?: (_envoy_type_matcher_v3_StructMatcher_PathSegment)[]; + /** + * The StructMatcher is matched if the value retrieved by path is matched to this value. + */ + 'value'?: (_envoy_type_matcher_v3_ValueMatcher | null); +} + +/** + * StructMatcher provides a general interface to check if a given value is matched in + * google.protobuf.Struct. It uses ``path`` to retrieve the value + * from the struct and then check if it's matched to the specified value. + * + * For example, for the following Struct: + * + * .. code-block:: yaml + * + * fields: + * a: + * struct_value: + * fields: + * b: + * struct_value: + * fields: + * c: + * string_value: pro + * t: + * list_value: + * values: + * - string_value: m + * - string_value: n + * + * The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" + * from the Metadata which is matched to the specified prefix match. + * + * .. code-block:: yaml + * + * path: + * - key: a + * - key: b + * - key: c + * value: + * string_match: + * prefix: pr + * + * The following StructMatcher is matched as the code will match one of the string values in the + * list at the path [a, t]. + * + * .. code-block:: yaml + * + * path: + * - key: a + * - key: t + * value: + * list_match: + * one_of: + * string_match: + * exact: m + * + * An example use of StructMatcher is to match metadata in envoy.v*.core.Node. + */ +export interface StructMatcher__Output { + /** + * The path to retrieve the Value from the Struct. + */ + 'path': (_envoy_type_matcher_v3_StructMatcher_PathSegment__Output)[]; + /** + * The StructMatcher is matched if the value retrieved by path is matched to this value. + */ + 'value': (_envoy_type_matcher_v3_ValueMatcher__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/ValueMatcher.ts b/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/ValueMatcher.ts new file mode 100644 index 000000000..d01060446 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/type/matcher/v3/ValueMatcher.ts @@ -0,0 +1,101 @@ +// Original file: deps/envoy-api/envoy/type/matcher/v3/value.proto + +import type { DoubleMatcher as _envoy_type_matcher_v3_DoubleMatcher, DoubleMatcher__Output as _envoy_type_matcher_v3_DoubleMatcher__Output } from '../../../../envoy/type/matcher/v3/DoubleMatcher'; +import type { StringMatcher as _envoy_type_matcher_v3_StringMatcher, StringMatcher__Output as _envoy_type_matcher_v3_StringMatcher__Output } from '../../../../envoy/type/matcher/v3/StringMatcher'; +import type { ListMatcher as _envoy_type_matcher_v3_ListMatcher, ListMatcher__Output as _envoy_type_matcher_v3_ListMatcher__Output } from '../../../../envoy/type/matcher/v3/ListMatcher'; + +/** + * NullMatch is an empty message to specify a null value. + */ +export interface _envoy_type_matcher_v3_ValueMatcher_NullMatch { +} + +/** + * NullMatch is an empty message to specify a null value. + */ +export interface _envoy_type_matcher_v3_ValueMatcher_NullMatch__Output { +} + +/** + * Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported. + * StructValue is not supported and is always not matched. + * [#next-free-field: 7] + */ +export interface ValueMatcher { + /** + * If specified, a match occurs if and only if the target value is a NullValue. + */ + 'null_match'?: (_envoy_type_matcher_v3_ValueMatcher_NullMatch | null); + /** + * If specified, a match occurs if and only if the target value is a double value and is + * matched to this field. + */ + 'double_match'?: (_envoy_type_matcher_v3_DoubleMatcher | null); + /** + * If specified, a match occurs if and only if the target value is a string value and is + * matched to this field. + */ + 'string_match'?: (_envoy_type_matcher_v3_StringMatcher | null); + /** + * If specified, a match occurs if and only if the target value is a bool value and is equal + * to this field. + */ + 'bool_match'?: (boolean); + /** + * If specified, value match will be performed based on whether the path is referring to a + * valid primitive value in the metadata. If the path is referring to a non-primitive value, + * the result is always not matched. + */ + 'present_match'?: (boolean); + /** + * If specified, a match occurs if and only if the target value is a list value and + * is matched to this field. + */ + 'list_match'?: (_envoy_type_matcher_v3_ListMatcher | null); + /** + * Specifies how to match a value. + */ + 'match_pattern'?: "null_match"|"double_match"|"string_match"|"bool_match"|"present_match"|"list_match"; +} + +/** + * Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported. + * StructValue is not supported and is always not matched. + * [#next-free-field: 7] + */ +export interface ValueMatcher__Output { + /** + * If specified, a match occurs if and only if the target value is a NullValue. + */ + 'null_match'?: (_envoy_type_matcher_v3_ValueMatcher_NullMatch__Output | null); + /** + * If specified, a match occurs if and only if the target value is a double value and is + * matched to this field. + */ + 'double_match'?: (_envoy_type_matcher_v3_DoubleMatcher__Output | null); + /** + * If specified, a match occurs if and only if the target value is a string value and is + * matched to this field. + */ + 'string_match'?: (_envoy_type_matcher_v3_StringMatcher__Output | null); + /** + * If specified, a match occurs if and only if the target value is a bool value and is equal + * to this field. + */ + 'bool_match'?: (boolean); + /** + * If specified, value match will be performed based on whether the path is referring to a + * valid primitive value in the metadata. If the path is referring to a non-primitive value, + * the result is always not matched. + */ + 'present_match'?: (boolean); + /** + * If specified, a match occurs if and only if the target value is a list value and + * is matched to this field. + */ + 'list_match'?: (_envoy_type_matcher_v3_ListMatcher__Output | null); + /** + * Specifies how to match a value. + */ + 'match_pattern': "null_match"|"double_match"|"string_match"|"bool_match"|"present_match"|"list_match"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/type/metadata/v2/MetadataKind.ts b/packages/grpc-js-xds/src/generated/envoy/type/metadata/v2/MetadataKind.ts deleted file mode 100644 index 665b95f0d..000000000 --- a/packages/grpc-js-xds/src/generated/envoy/type/metadata/v2/MetadataKind.ts +++ /dev/null @@ -1,98 +0,0 @@ -// Original file: deps/envoy-api/envoy/type/metadata/v2/metadata.proto - - -/** - * Represents metadata from :ref:`the upstream cluster`. - */ -export interface _envoy_type_metadata_v2_MetadataKind_Cluster { -} - -/** - * Represents metadata from :ref:`the upstream cluster`. - */ -export interface _envoy_type_metadata_v2_MetadataKind_Cluster__Output { -} - -/** - * Represents metadata from :ref:`the upstream - * host`. - */ -export interface _envoy_type_metadata_v2_MetadataKind_Host { -} - -/** - * Represents metadata from :ref:`the upstream - * host`. - */ -export interface _envoy_type_metadata_v2_MetadataKind_Host__Output { -} - -/** - * Represents dynamic metadata associated with the request. - */ -export interface _envoy_type_metadata_v2_MetadataKind_Request { -} - -/** - * Represents dynamic metadata associated with the request. - */ -export interface _envoy_type_metadata_v2_MetadataKind_Request__Output { -} - -/** - * Represents metadata from :ref:`the route`. - */ -export interface _envoy_type_metadata_v2_MetadataKind_Route { -} - -/** - * Represents metadata from :ref:`the route`. - */ -export interface _envoy_type_metadata_v2_MetadataKind_Route__Output { -} - -/** - * Describes what kind of metadata. - */ -export interface MetadataKind { - /** - * Request kind of metadata. - */ - 'request'?: (_envoy_type_metadata_v2_MetadataKind_Request); - /** - * Route kind of metadata. - */ - 'route'?: (_envoy_type_metadata_v2_MetadataKind_Route); - /** - * Cluster kind of metadata. - */ - 'cluster'?: (_envoy_type_metadata_v2_MetadataKind_Cluster); - /** - * Host kind of metadata. - */ - 'host'?: (_envoy_type_metadata_v2_MetadataKind_Host); - 'kind'?: "request"|"route"|"cluster"|"host"; -} - -/** - * Describes what kind of metadata. - */ -export interface MetadataKind__Output { - /** - * Request kind of metadata. - */ - 'request'?: (_envoy_type_metadata_v2_MetadataKind_Request__Output); - /** - * Route kind of metadata. - */ - 'route'?: (_envoy_type_metadata_v2_MetadataKind_Route__Output); - /** - * Cluster kind of metadata. - */ - 'cluster'?: (_envoy_type_metadata_v2_MetadataKind_Cluster__Output); - /** - * Host kind of metadata. - */ - 'host'?: (_envoy_type_metadata_v2_MetadataKind_Host__Output); - 'kind': "request"|"route"|"cluster"|"host"; -} diff --git a/packages/grpc-js-xds/src/generated/envoy/type/metadata/v2/MetadataKey.ts b/packages/grpc-js-xds/src/generated/envoy/type/metadata/v3/MetadataKey.ts similarity index 79% rename from packages/grpc-js-xds/src/generated/envoy/type/metadata/v2/MetadataKey.ts rename to packages/grpc-js-xds/src/generated/envoy/type/metadata/v3/MetadataKey.ts index 94d661879..bc81233fc 100644 --- a/packages/grpc-js-xds/src/generated/envoy/type/metadata/v2/MetadataKey.ts +++ b/packages/grpc-js-xds/src/generated/envoy/type/metadata/v3/MetadataKey.ts @@ -1,11 +1,11 @@ -// Original file: deps/envoy-api/envoy/type/metadata/v2/metadata.proto +// Original file: deps/envoy-api/envoy/type/metadata/v3/metadata.proto /** * Specifies the segment in a path to retrieve value from Metadata. * Currently it is only supported to specify the key, i.e. field name, as one segment of a path. */ -export interface _envoy_type_metadata_v2_MetadataKey_PathSegment { +export interface _envoy_type_metadata_v3_MetadataKey_PathSegment { /** * If specified, use the key to retrieve the value in a Struct. */ @@ -17,7 +17,7 @@ export interface _envoy_type_metadata_v2_MetadataKey_PathSegment { * Specifies the segment in a path to retrieve value from Metadata. * Currently it is only supported to specify the key, i.e. field name, as one segment of a path. */ -export interface _envoy_type_metadata_v2_MetadataKey_PathSegment__Output { +export interface _envoy_type_metadata_v3_MetadataKey_PathSegment__Output { /** * If specified, use the key to retrieve the value in a Struct. */ @@ -26,8 +26,8 @@ export interface _envoy_type_metadata_v2_MetadataKey_PathSegment__Output { } /** - * MetadataKey provides a general interface using `key` and `path` to retrieve value from - * :ref:`Metadata `. + * MetadataKey provides a general interface using ``key`` and ``path`` to retrieve value from + * :ref:`Metadata `. * * For example, for the following Metadata: * @@ -63,12 +63,12 @@ export interface MetadataKey { * Note: Due to that only the key type segment is supported, the path can not specify a list * unless the list is the last segment. */ - 'path'?: (_envoy_type_metadata_v2_MetadataKey_PathSegment)[]; + 'path'?: (_envoy_type_metadata_v3_MetadataKey_PathSegment)[]; } /** - * MetadataKey provides a general interface using `key` and `path` to retrieve value from - * :ref:`Metadata `. + * MetadataKey provides a general interface using ``key`` and ``path`` to retrieve value from + * :ref:`Metadata `. * * For example, for the following Metadata: * @@ -104,5 +104,5 @@ export interface MetadataKey__Output { * Note: Due to that only the key type segment is supported, the path can not specify a list * unless the list is the last segment. */ - 'path': (_envoy_type_metadata_v2_MetadataKey_PathSegment__Output)[]; + 'path': (_envoy_type_metadata_v3_MetadataKey_PathSegment__Output)[]; } diff --git a/packages/grpc-js-xds/src/generated/envoy/type/metadata/v3/MetadataKind.ts b/packages/grpc-js-xds/src/generated/envoy/type/metadata/v3/MetadataKind.ts new file mode 100644 index 000000000..3ca368ccb --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/type/metadata/v3/MetadataKind.ts @@ -0,0 +1,98 @@ +// Original file: deps/envoy-api/envoy/type/metadata/v3/metadata.proto + + +/** + * Represents metadata from :ref:`the upstream cluster`. + */ +export interface _envoy_type_metadata_v3_MetadataKind_Cluster { +} + +/** + * Represents metadata from :ref:`the upstream cluster`. + */ +export interface _envoy_type_metadata_v3_MetadataKind_Cluster__Output { +} + +/** + * Represents metadata from :ref:`the upstream + * host`. + */ +export interface _envoy_type_metadata_v3_MetadataKind_Host { +} + +/** + * Represents metadata from :ref:`the upstream + * host`. + */ +export interface _envoy_type_metadata_v3_MetadataKind_Host__Output { +} + +/** + * Represents dynamic metadata associated with the request. + */ +export interface _envoy_type_metadata_v3_MetadataKind_Request { +} + +/** + * Represents dynamic metadata associated with the request. + */ +export interface _envoy_type_metadata_v3_MetadataKind_Request__Output { +} + +/** + * Represents metadata from :ref:`the route`. + */ +export interface _envoy_type_metadata_v3_MetadataKind_Route { +} + +/** + * Represents metadata from :ref:`the route`. + */ +export interface _envoy_type_metadata_v3_MetadataKind_Route__Output { +} + +/** + * Describes what kind of metadata. + */ +export interface MetadataKind { + /** + * Request kind of metadata. + */ + 'request'?: (_envoy_type_metadata_v3_MetadataKind_Request | null); + /** + * Route kind of metadata. + */ + 'route'?: (_envoy_type_metadata_v3_MetadataKind_Route | null); + /** + * Cluster kind of metadata. + */ + 'cluster'?: (_envoy_type_metadata_v3_MetadataKind_Cluster | null); + /** + * Host kind of metadata. + */ + 'host'?: (_envoy_type_metadata_v3_MetadataKind_Host | null); + 'kind'?: "request"|"route"|"cluster"|"host"; +} + +/** + * Describes what kind of metadata. + */ +export interface MetadataKind__Output { + /** + * Request kind of metadata. + */ + 'request'?: (_envoy_type_metadata_v3_MetadataKind_Request__Output | null); + /** + * Route kind of metadata. + */ + 'route'?: (_envoy_type_metadata_v3_MetadataKind_Route__Output | null); + /** + * Cluster kind of metadata. + */ + 'cluster'?: (_envoy_type_metadata_v3_MetadataKind_Cluster__Output | null); + /** + * Host kind of metadata. + */ + 'host'?: (_envoy_type_metadata_v3_MetadataKind_Host__Output | null); + 'kind': "request"|"route"|"cluster"|"host"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/type/tracing/v2/CustomTag.ts b/packages/grpc-js-xds/src/generated/envoy/type/tracing/v3/CustomTag.ts similarity index 64% rename from packages/grpc-js-xds/src/generated/envoy/type/tracing/v2/CustomTag.ts rename to packages/grpc-js-xds/src/generated/envoy/type/tracing/v3/CustomTag.ts index 3eed4cf44..34ac26f8c 100644 --- a/packages/grpc-js-xds/src/generated/envoy/type/tracing/v2/CustomTag.ts +++ b/packages/grpc-js-xds/src/generated/envoy/type/tracing/v3/CustomTag.ts @@ -1,12 +1,12 @@ -// Original file: deps/envoy-api/envoy/type/tracing/v2/custom_tag.proto +// Original file: deps/envoy-api/envoy/type/tracing/v3/custom_tag.proto -import type { MetadataKind as _envoy_type_metadata_v2_MetadataKind, MetadataKind__Output as _envoy_type_metadata_v2_MetadataKind__Output } from '../../../../envoy/type/metadata/v2/MetadataKind'; -import type { MetadataKey as _envoy_type_metadata_v2_MetadataKey, MetadataKey__Output as _envoy_type_metadata_v2_MetadataKey__Output } from '../../../../envoy/type/metadata/v2/MetadataKey'; +import type { MetadataKind as _envoy_type_metadata_v3_MetadataKind, MetadataKind__Output as _envoy_type_metadata_v3_MetadataKind__Output } from '../../../../envoy/type/metadata/v3/MetadataKind'; +import type { MetadataKey as _envoy_type_metadata_v3_MetadataKey, MetadataKey__Output as _envoy_type_metadata_v3_MetadataKey__Output } from '../../../../envoy/type/metadata/v3/MetadataKey'; /** * Environment type custom tag with environment name and default value. */ -export interface _envoy_type_tracing_v2_CustomTag_Environment { +export interface _envoy_type_tracing_v3_CustomTag_Environment { /** * Environment variable name to obtain the value to populate the tag value. */ @@ -22,7 +22,7 @@ export interface _envoy_type_tracing_v2_CustomTag_Environment { /** * Environment type custom tag with environment name and default value. */ -export interface _envoy_type_tracing_v2_CustomTag_Environment__Output { +export interface _envoy_type_tracing_v3_CustomTag_Environment__Output { /** * Environment variable name to obtain the value to populate the tag value. */ @@ -38,7 +38,7 @@ export interface _envoy_type_tracing_v2_CustomTag_Environment__Output { /** * Header type custom tag with header name and default value. */ -export interface _envoy_type_tracing_v2_CustomTag_Header { +export interface _envoy_type_tracing_v3_CustomTag_Header { /** * Header name to obtain the value to populate the tag value. */ @@ -54,7 +54,7 @@ export interface _envoy_type_tracing_v2_CustomTag_Header { /** * Header type custom tag with header name and default value. */ -export interface _envoy_type_tracing_v2_CustomTag_Header__Output { +export interface _envoy_type_tracing_v3_CustomTag_Header__Output { /** * Header name to obtain the value to populate the tag value. */ @@ -70,7 +70,7 @@ export interface _envoy_type_tracing_v2_CustomTag_Header__Output { /** * Literal type custom tag with static value for the tag value. */ -export interface _envoy_type_tracing_v2_CustomTag_Literal { +export interface _envoy_type_tracing_v3_CustomTag_Literal { /** * Static literal value to populate the tag value. */ @@ -80,7 +80,7 @@ export interface _envoy_type_tracing_v2_CustomTag_Literal { /** * Literal type custom tag with static value for the tag value. */ -export interface _envoy_type_tracing_v2_CustomTag_Literal__Output { +export interface _envoy_type_tracing_v3_CustomTag_Literal__Output { /** * Static literal value to populate the tag value. */ @@ -89,20 +89,20 @@ export interface _envoy_type_tracing_v2_CustomTag_Literal__Output { /** * Metadata type custom tag using - * :ref:`MetadataKey ` to retrieve the protobuf value - * from :ref:`Metadata `, and populate the tag value with + * :ref:`MetadataKey ` to retrieve the protobuf value + * from :ref:`Metadata `, and populate the tag value with * `the canonical JSON `_ * representation of it. */ -export interface _envoy_type_tracing_v2_CustomTag_Metadata { +export interface _envoy_type_tracing_v3_CustomTag_Metadata { /** * Specify what kind of metadata to obtain tag value from. */ - 'kind'?: (_envoy_type_metadata_v2_MetadataKind); + 'kind'?: (_envoy_type_metadata_v3_MetadataKind | null); /** * Metadata key to define the path to retrieve the tag value. */ - 'metadata_key'?: (_envoy_type_metadata_v2_MetadataKey); + 'metadata_key'?: (_envoy_type_metadata_v3_MetadataKey | null); /** * When no valid metadata is found, * the tag value would be populated with this default value if specified, @@ -113,20 +113,20 @@ export interface _envoy_type_tracing_v2_CustomTag_Metadata { /** * Metadata type custom tag using - * :ref:`MetadataKey ` to retrieve the protobuf value - * from :ref:`Metadata `, and populate the tag value with + * :ref:`MetadataKey ` to retrieve the protobuf value + * from :ref:`Metadata `, and populate the tag value with * `the canonical JSON `_ * representation of it. */ -export interface _envoy_type_tracing_v2_CustomTag_Metadata__Output { +export interface _envoy_type_tracing_v3_CustomTag_Metadata__Output { /** * Specify what kind of metadata to obtain tag value from. */ - 'kind'?: (_envoy_type_metadata_v2_MetadataKind__Output); + 'kind': (_envoy_type_metadata_v3_MetadataKind__Output | null); /** * Metadata key to define the path to retrieve the tag value. */ - 'metadata_key'?: (_envoy_type_metadata_v2_MetadataKey__Output); + 'metadata_key': (_envoy_type_metadata_v3_MetadataKey__Output | null); /** * When no valid metadata is found, * the tag value would be populated with this default value if specified, @@ -147,19 +147,19 @@ export interface CustomTag { /** * A literal custom tag. */ - 'literal'?: (_envoy_type_tracing_v2_CustomTag_Literal); + 'literal'?: (_envoy_type_tracing_v3_CustomTag_Literal | null); /** * An environment custom tag. */ - 'environment'?: (_envoy_type_tracing_v2_CustomTag_Environment); + 'environment'?: (_envoy_type_tracing_v3_CustomTag_Environment | null); /** * A request header custom tag. */ - 'request_header'?: (_envoy_type_tracing_v2_CustomTag_Header); + 'request_header'?: (_envoy_type_tracing_v3_CustomTag_Header | null); /** * A custom tag to obtain tag value from the metadata. */ - 'metadata'?: (_envoy_type_tracing_v2_CustomTag_Metadata); + 'metadata'?: (_envoy_type_tracing_v3_CustomTag_Metadata | null); /** * Used to specify what kind of custom tag. */ @@ -178,19 +178,19 @@ export interface CustomTag__Output { /** * A literal custom tag. */ - 'literal'?: (_envoy_type_tracing_v2_CustomTag_Literal__Output); + 'literal'?: (_envoy_type_tracing_v3_CustomTag_Literal__Output | null); /** * An environment custom tag. */ - 'environment'?: (_envoy_type_tracing_v2_CustomTag_Environment__Output); + 'environment'?: (_envoy_type_tracing_v3_CustomTag_Environment__Output | null); /** * A request header custom tag. */ - 'request_header'?: (_envoy_type_tracing_v2_CustomTag_Header__Output); + 'request_header'?: (_envoy_type_tracing_v3_CustomTag_Header__Output | null); /** * A custom tag to obtain tag value from the metadata. */ - 'metadata'?: (_envoy_type_tracing_v2_CustomTag_Metadata__Output); + 'metadata'?: (_envoy_type_tracing_v3_CustomTag_Metadata__Output | null); /** * Used to specify what kind of custom tag. */ diff --git a/packages/grpc-js-xds/src/generated/envoy/type/v3/CodecClientType.ts b/packages/grpc-js-xds/src/generated/envoy/type/v3/CodecClientType.ts new file mode 100644 index 000000000..e05cdfb96 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/envoy/type/v3/CodecClientType.ts @@ -0,0 +1,27 @@ +// Original file: deps/envoy-api/envoy/type/v3/http.proto + +export const CodecClientType = { + HTTP1: 'HTTP1', + HTTP2: 'HTTP2', + /** + * [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with + * caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient + * to distinguish HTTP1 and HTTP2 traffic. + */ + HTTP3: 'HTTP3', +} as const; + +export type CodecClientType = + | 'HTTP1' + | 0 + | 'HTTP2' + | 1 + /** + * [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with + * caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient + * to distinguish HTTP1 and HTTP2 traffic. + */ + | 'HTTP3' + | 2 + +export type CodecClientType__Output = typeof CodecClientType[keyof typeof CodecClientType] diff --git a/packages/grpc-js-xds/src/generated/envoy/type/DoubleRange.ts b/packages/grpc-js-xds/src/generated/envoy/type/v3/DoubleRange.ts similarity index 82% rename from packages/grpc-js-xds/src/generated/envoy/type/DoubleRange.ts rename to packages/grpc-js-xds/src/generated/envoy/type/v3/DoubleRange.ts index 5ebc3a579..5d13bebdb 100644 --- a/packages/grpc-js-xds/src/generated/envoy/type/DoubleRange.ts +++ b/packages/grpc-js-xds/src/generated/envoy/type/v3/DoubleRange.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/type/range.proto +// Original file: deps/envoy-api/envoy/type/v3/range.proto /** @@ -24,9 +24,9 @@ export interface DoubleRange__Output { /** * start of the range (inclusive) */ - 'start': (number | string); + 'start': (number); /** * end of the range (exclusive) */ - 'end': (number | string); + 'end': (number); } diff --git a/packages/grpc-js-xds/src/generated/envoy/type/FractionalPercent.ts b/packages/grpc-js-xds/src/generated/envoy/type/v3/FractionalPercent.ts similarity index 59% rename from packages/grpc-js-xds/src/generated/envoy/type/FractionalPercent.ts rename to packages/grpc-js-xds/src/generated/envoy/type/v3/FractionalPercent.ts index e450f0bfa..c45441a79 100644 --- a/packages/grpc-js-xds/src/generated/envoy/type/FractionalPercent.ts +++ b/packages/grpc-js-xds/src/generated/envoy/type/v3/FractionalPercent.ts @@ -1,31 +1,62 @@ -// Original file: deps/envoy-api/envoy/type/percent.proto +// Original file: deps/envoy-api/envoy/type/v3/percent.proto -// Original file: deps/envoy-api/envoy/type/percent.proto +// Original file: deps/envoy-api/envoy/type/v3/percent.proto /** * Fraction percentages support several fixed denominator values. */ -export enum _envoy_type_FractionalPercent_DenominatorType { +export const _envoy_type_v3_FractionalPercent_DenominatorType = { /** * 100. * * **Example**: 1/100 = 1%. */ - HUNDRED = 0, + HUNDRED: 'HUNDRED', /** * 10,000. * * **Example**: 1/10000 = 0.01%. */ - TEN_THOUSAND = 1, + TEN_THOUSAND: 'TEN_THOUSAND', /** * 1,000,000. * * **Example**: 1/1000000 = 0.0001%. */ - MILLION = 2, -} + MILLION: 'MILLION', +} as const; + +/** + * Fraction percentages support several fixed denominator values. + */ +export type _envoy_type_v3_FractionalPercent_DenominatorType = + /** + * 100. + * + * **Example**: 1/100 = 1%. + */ + | 'HUNDRED' + | 0 + /** + * 10,000. + * + * **Example**: 1/10000 = 0.01%. + */ + | 'TEN_THOUSAND' + | 1 + /** + * 1,000,000. + * + * **Example**: 1/1000000 = 0.0001%. + */ + | 'MILLION' + | 2 + +/** + * Fraction percentages support several fixed denominator values. + */ +export type _envoy_type_v3_FractionalPercent_DenominatorType__Output = typeof _envoy_type_v3_FractionalPercent_DenominatorType[keyof typeof _envoy_type_v3_FractionalPercent_DenominatorType] /** * A fractional percentage is used in cases in which for performance reasons performing floating @@ -44,7 +75,7 @@ export interface FractionalPercent { * Specifies the denominator. If the denominator specified is less than the numerator, the final * fractional percentage is capped at 1 (100%). */ - 'denominator'?: (_envoy_type_FractionalPercent_DenominatorType | keyof typeof _envoy_type_FractionalPercent_DenominatorType); + 'denominator'?: (_envoy_type_v3_FractionalPercent_DenominatorType); } /** @@ -64,5 +95,5 @@ export interface FractionalPercent__Output { * Specifies the denominator. If the denominator specified is less than the numerator, the final * fractional percentage is capped at 1 (100%). */ - 'denominator': (keyof typeof _envoy_type_FractionalPercent_DenominatorType); + 'denominator': (_envoy_type_v3_FractionalPercent_DenominatorType__Output); } diff --git a/packages/grpc-js-xds/src/generated/envoy/type/Int32Range.ts b/packages/grpc-js-xds/src/generated/envoy/type/v3/Int32Range.ts similarity index 90% rename from packages/grpc-js-xds/src/generated/envoy/type/Int32Range.ts rename to packages/grpc-js-xds/src/generated/envoy/type/v3/Int32Range.ts index f5475c2db..826af6c38 100644 --- a/packages/grpc-js-xds/src/generated/envoy/type/Int32Range.ts +++ b/packages/grpc-js-xds/src/generated/envoy/type/v3/Int32Range.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/type/range.proto +// Original file: deps/envoy-api/envoy/type/v3/range.proto /** diff --git a/packages/grpc-js-xds/src/generated/envoy/type/Int64Range.ts b/packages/grpc-js-xds/src/generated/envoy/type/v3/Int64Range.ts similarity index 91% rename from packages/grpc-js-xds/src/generated/envoy/type/Int64Range.ts rename to packages/grpc-js-xds/src/generated/envoy/type/v3/Int64Range.ts index f9664cba4..89ce7fb8d 100644 --- a/packages/grpc-js-xds/src/generated/envoy/type/Int64Range.ts +++ b/packages/grpc-js-xds/src/generated/envoy/type/v3/Int64Range.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/type/range.proto +// Original file: deps/envoy-api/envoy/type/v3/range.proto import type { Long } from '@grpc/proto-loader'; diff --git a/packages/grpc-js-xds/src/generated/envoy/type/Percent.ts b/packages/grpc-js-xds/src/generated/envoy/type/v3/Percent.ts similarity index 71% rename from packages/grpc-js-xds/src/generated/envoy/type/Percent.ts rename to packages/grpc-js-xds/src/generated/envoy/type/v3/Percent.ts index f63553acd..01d236c4e 100644 --- a/packages/grpc-js-xds/src/generated/envoy/type/Percent.ts +++ b/packages/grpc-js-xds/src/generated/envoy/type/v3/Percent.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/type/percent.proto +// Original file: deps/envoy-api/envoy/type/v3/percent.proto /** @@ -12,5 +12,5 @@ export interface Percent { * Identifies a percentage, in the range [0.0, 100.0]. */ export interface Percent__Output { - 'value': (number | string); + 'value': (number); } diff --git a/packages/grpc-js-xds/src/generated/envoy/type/SemanticVersion.ts b/packages/grpc-js-xds/src/generated/envoy/type/v3/SemanticVersion.ts similarity index 90% rename from packages/grpc-js-xds/src/generated/envoy/type/SemanticVersion.ts rename to packages/grpc-js-xds/src/generated/envoy/type/v3/SemanticVersion.ts index f99431703..3f714a766 100644 --- a/packages/grpc-js-xds/src/generated/envoy/type/SemanticVersion.ts +++ b/packages/grpc-js-xds/src/generated/envoy/type/v3/SemanticVersion.ts @@ -1,4 +1,4 @@ -// Original file: deps/envoy-api/envoy/type/semantic_version.proto +// Original file: deps/envoy-api/envoy/type/v3/semantic_version.proto /** diff --git a/packages/grpc-js-xds/src/generated/fault.ts b/packages/grpc-js-xds/src/generated/fault.ts new file mode 100644 index 000000000..4ec7ed078 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/fault.ts @@ -0,0 +1,242 @@ +import type * as grpc from '@grpc/grpc-js'; +import type { EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; + + +type SubtypeConstructor any, Subtype> = { + new(...args: ConstructorParameters): Subtype; +}; + +export interface ProtoGrpcType { + envoy: { + annotations: { + } + config: { + core: { + v3: { + Address: MessageTypeDefinition + AsyncDataSource: MessageTypeDefinition + BackoffStrategy: MessageTypeDefinition + BindConfig: MessageTypeDefinition + BuildVersion: MessageTypeDefinition + CidrRange: MessageTypeDefinition + ControlPlane: MessageTypeDefinition + DataSource: MessageTypeDefinition + EnvoyInternalAddress: MessageTypeDefinition + Extension: MessageTypeDefinition + ExtraSourceAddress: MessageTypeDefinition + HeaderMap: MessageTypeDefinition + HeaderValue: MessageTypeDefinition + HeaderValueOption: MessageTypeDefinition + HttpUri: MessageTypeDefinition + Locality: MessageTypeDefinition + Metadata: MessageTypeDefinition + Node: MessageTypeDefinition + Pipe: MessageTypeDefinition + ProxyProtocolConfig: MessageTypeDefinition + ProxyProtocolPassThroughTLVs: MessageTypeDefinition + QueryParameter: MessageTypeDefinition + RemoteDataSource: MessageTypeDefinition + RequestMethod: EnumTypeDefinition + RetryPolicy: MessageTypeDefinition + RoutingPriority: EnumTypeDefinition + RuntimeDouble: MessageTypeDefinition + RuntimeFeatureFlag: MessageTypeDefinition + RuntimeFractionalPercent: MessageTypeDefinition + RuntimePercent: MessageTypeDefinition + RuntimeUInt32: MessageTypeDefinition + SocketAddress: MessageTypeDefinition + SocketOption: MessageTypeDefinition + SocketOptionsOverride: MessageTypeDefinition + TcpKeepalive: MessageTypeDefinition + TrafficDirection: EnumTypeDefinition + TransportSocket: MessageTypeDefinition + TypedExtensionConfig: MessageTypeDefinition + WatchedDirectory: MessageTypeDefinition + } + } + route: { + v3: { + ClusterSpecifierPlugin: MessageTypeDefinition + CorsPolicy: MessageTypeDefinition + Decorator: MessageTypeDefinition + DirectResponseAction: MessageTypeDefinition + FilterAction: MessageTypeDefinition + FilterConfig: MessageTypeDefinition + HeaderMatcher: MessageTypeDefinition + HedgePolicy: MessageTypeDefinition + InternalRedirectPolicy: MessageTypeDefinition + NonForwardingAction: MessageTypeDefinition + QueryParameterMatcher: MessageTypeDefinition + RateLimit: MessageTypeDefinition + RedirectAction: MessageTypeDefinition + RetryPolicy: MessageTypeDefinition + Route: MessageTypeDefinition + RouteAction: MessageTypeDefinition + RouteList: MessageTypeDefinition + RouteMatch: MessageTypeDefinition + Tracing: MessageTypeDefinition + VirtualCluster: MessageTypeDefinition + VirtualHost: MessageTypeDefinition + WeightedCluster: MessageTypeDefinition + } + } + } + extensions: { + filters: { + common: { + fault: { + v3: { + FaultDelay: MessageTypeDefinition + FaultRateLimit: MessageTypeDefinition + } + } + } + http: { + fault: { + v3: { + FaultAbort: MessageTypeDefinition + HTTPFault: MessageTypeDefinition + } + } + } + } + } + type: { + matcher: { + v3: { + DoubleMatcher: MessageTypeDefinition + ListMatcher: MessageTypeDefinition + ListStringMatcher: MessageTypeDefinition + MetadataMatcher: MessageTypeDefinition + RegexMatchAndSubstitute: MessageTypeDefinition + RegexMatcher: MessageTypeDefinition + StringMatcher: MessageTypeDefinition + ValueMatcher: MessageTypeDefinition + } + } + metadata: { + v3: { + MetadataKey: MessageTypeDefinition + MetadataKind: MessageTypeDefinition + } + } + tracing: { + v3: { + CustomTag: MessageTypeDefinition + } + } + v3: { + DoubleRange: MessageTypeDefinition + FractionalPercent: MessageTypeDefinition + Int32Range: MessageTypeDefinition + Int64Range: MessageTypeDefinition + Percent: MessageTypeDefinition + SemanticVersion: MessageTypeDefinition + } + } + } + google: { + protobuf: { + Any: MessageTypeDefinition + BoolValue: MessageTypeDefinition + BytesValue: MessageTypeDefinition + DescriptorProto: MessageTypeDefinition + DoubleValue: MessageTypeDefinition + Duration: MessageTypeDefinition + EnumDescriptorProto: MessageTypeDefinition + EnumOptions: MessageTypeDefinition + EnumValueDescriptorProto: MessageTypeDefinition + EnumValueOptions: MessageTypeDefinition + FieldDescriptorProto: MessageTypeDefinition + FieldOptions: MessageTypeDefinition + FileDescriptorProto: MessageTypeDefinition + FileDescriptorSet: MessageTypeDefinition + FileOptions: MessageTypeDefinition + FloatValue: MessageTypeDefinition + GeneratedCodeInfo: MessageTypeDefinition + Int32Value: MessageTypeDefinition + Int64Value: MessageTypeDefinition + ListValue: MessageTypeDefinition + MessageOptions: MessageTypeDefinition + MethodDescriptorProto: MessageTypeDefinition + MethodOptions: MessageTypeDefinition + NullValue: EnumTypeDefinition + OneofDescriptorProto: MessageTypeDefinition + OneofOptions: MessageTypeDefinition + ServiceDescriptorProto: MessageTypeDefinition + ServiceOptions: MessageTypeDefinition + SourceCodeInfo: MessageTypeDefinition + StringValue: MessageTypeDefinition + Struct: MessageTypeDefinition + Timestamp: MessageTypeDefinition + UInt32Value: MessageTypeDefinition + UInt64Value: MessageTypeDefinition + UninterpretedOption: MessageTypeDefinition + Value: MessageTypeDefinition + } + } + udpa: { + annotations: { + FieldMigrateAnnotation: MessageTypeDefinition + FileMigrateAnnotation: MessageTypeDefinition + MigrateAnnotation: MessageTypeDefinition + PackageVersionStatus: EnumTypeDefinition + StatusAnnotation: MessageTypeDefinition + VersioningAnnotation: MessageTypeDefinition + } + } + validate: { + AnyRules: MessageTypeDefinition + BoolRules: MessageTypeDefinition + BytesRules: MessageTypeDefinition + DoubleRules: MessageTypeDefinition + DurationRules: MessageTypeDefinition + EnumRules: MessageTypeDefinition + FieldRules: MessageTypeDefinition + Fixed32Rules: MessageTypeDefinition + Fixed64Rules: MessageTypeDefinition + FloatRules: MessageTypeDefinition + Int32Rules: MessageTypeDefinition + Int64Rules: MessageTypeDefinition + KnownRegex: EnumTypeDefinition + MapRules: MessageTypeDefinition + MessageRules: MessageTypeDefinition + RepeatedRules: MessageTypeDefinition + SFixed32Rules: MessageTypeDefinition + SFixed64Rules: MessageTypeDefinition + SInt32Rules: MessageTypeDefinition + SInt64Rules: MessageTypeDefinition + StringRules: MessageTypeDefinition + TimestampRules: MessageTypeDefinition + UInt32Rules: MessageTypeDefinition + UInt64Rules: MessageTypeDefinition + } + xds: { + annotations: { + v3: { + FieldStatusAnnotation: MessageTypeDefinition + FileStatusAnnotation: MessageTypeDefinition + MessageStatusAnnotation: MessageTypeDefinition + PackageVersionStatus: EnumTypeDefinition + StatusAnnotation: MessageTypeDefinition + } + } + core: { + v3: { + ContextParams: MessageTypeDefinition + TypedExtensionConfig: MessageTypeDefinition + } + } + type: { + matcher: { + v3: { + ListStringMatcher: MessageTypeDefinition + Matcher: MessageTypeDefinition + RegexMatcher: MessageTypeDefinition + StringMatcher: MessageTypeDefinition + } + } + } + } +} + diff --git a/packages/grpc-js-xds/src/generated/google/api/HttpRule.ts b/packages/grpc-js-xds/src/generated/google/api/HttpRule.ts index 21ad897ee..243a99f80 100644 --- a/packages/grpc-js-xds/src/generated/google/api/HttpRule.ts +++ b/packages/grpc-js-xds/src/generated/google/api/HttpRule.ts @@ -317,7 +317,7 @@ export interface HttpRule { * HTTP method unspecified for this rule. The wild-card rule is useful * for services that provide content to Web (HTML) clients. */ - 'custom'?: (_google_api_CustomHttpPattern); + 'custom'?: (_google_api_CustomHttpPattern | null); /** * Additional HTTP bindings for the selector. Nested bindings must * not contain an `additional_bindings` field themselves (that is, @@ -655,7 +655,7 @@ export interface HttpRule__Output { * HTTP method unspecified for this rule. The wild-card rule is useful * for services that provide content to Web (HTML) clients. */ - 'custom'?: (_google_api_CustomHttpPattern__Output); + 'custom'?: (_google_api_CustomHttpPattern__Output | null); /** * Additional HTTP bindings for the selector. Nested bindings must * not contain an `additional_bindings` field themselves (that is, diff --git a/packages/grpc-js-xds/src/generated/google/protobuf/Any.ts b/packages/grpc-js-xds/src/generated/google/protobuf/Any.ts index fe0d05f12..fcaa6724e 100644 --- a/packages/grpc-js-xds/src/generated/google/protobuf/Any.ts +++ b/packages/grpc-js-xds/src/generated/google/protobuf/Any.ts @@ -7,7 +7,7 @@ export type Any = AnyExtension | { value: Buffer | Uint8Array | string; } -export type Any__Output = AnyExtension | { - type_url: string; - value: Buffer; +export interface Any__Output { + 'type_url': (string); + 'value': (Buffer); } diff --git a/packages/grpc-js-xds/src/generated/google/protobuf/DescriptorProto.ts b/packages/grpc-js-xds/src/generated/google/protobuf/DescriptorProto.ts index 2f6f9f0cc..f729437f4 100644 --- a/packages/grpc-js-xds/src/generated/google/protobuf/DescriptorProto.ts +++ b/packages/grpc-js-xds/src/generated/google/protobuf/DescriptorProto.ts @@ -33,7 +33,7 @@ export interface DescriptorProto { 'enumType'?: (_google_protobuf_EnumDescriptorProto)[]; 'extensionRange'?: (_google_protobuf_DescriptorProto_ExtensionRange)[]; 'extension'?: (_google_protobuf_FieldDescriptorProto)[]; - 'options'?: (_google_protobuf_MessageOptions); + 'options'?: (_google_protobuf_MessageOptions | null); 'oneofDecl'?: (_google_protobuf_OneofDescriptorProto)[]; 'reservedRange'?: (_google_protobuf_DescriptorProto_ReservedRange)[]; 'reservedName'?: (string)[]; @@ -46,7 +46,7 @@ export interface DescriptorProto__Output { 'enumType': (_google_protobuf_EnumDescriptorProto__Output)[]; 'extensionRange': (_google_protobuf_DescriptorProto_ExtensionRange__Output)[]; 'extension': (_google_protobuf_FieldDescriptorProto__Output)[]; - 'options'?: (_google_protobuf_MessageOptions__Output); + 'options': (_google_protobuf_MessageOptions__Output | null); 'oneofDecl': (_google_protobuf_OneofDescriptorProto__Output)[]; 'reservedRange': (_google_protobuf_DescriptorProto_ReservedRange__Output)[]; 'reservedName': (string)[]; diff --git a/packages/grpc-js-xds/src/generated/google/protobuf/DoubleValue.ts b/packages/grpc-js-xds/src/generated/google/protobuf/DoubleValue.ts index e4f2eb4b8..d70b303c2 100644 --- a/packages/grpc-js-xds/src/generated/google/protobuf/DoubleValue.ts +++ b/packages/grpc-js-xds/src/generated/google/protobuf/DoubleValue.ts @@ -6,5 +6,5 @@ export interface DoubleValue { } export interface DoubleValue__Output { - 'value': (number | string); + 'value': (number); } diff --git a/packages/grpc-js-xds/src/generated/google/protobuf/EnumDescriptorProto.ts b/packages/grpc-js-xds/src/generated/google/protobuf/EnumDescriptorProto.ts index 7aa40ce4d..dc4c9673e 100644 --- a/packages/grpc-js-xds/src/generated/google/protobuf/EnumDescriptorProto.ts +++ b/packages/grpc-js-xds/src/generated/google/protobuf/EnumDescriptorProto.ts @@ -6,11 +6,11 @@ import type { EnumOptions as _google_protobuf_EnumOptions, EnumOptions__Output a export interface EnumDescriptorProto { 'name'?: (string); 'value'?: (_google_protobuf_EnumValueDescriptorProto)[]; - 'options'?: (_google_protobuf_EnumOptions); + 'options'?: (_google_protobuf_EnumOptions | null); } export interface EnumDescriptorProto__Output { 'name': (string); 'value': (_google_protobuf_EnumValueDescriptorProto__Output)[]; - 'options'?: (_google_protobuf_EnumOptions__Output); + 'options': (_google_protobuf_EnumOptions__Output | null); } diff --git a/packages/grpc-js-xds/src/generated/google/protobuf/EnumOptions.ts b/packages/grpc-js-xds/src/generated/google/protobuf/EnumOptions.ts index b92f699a0..b92ade4f9 100644 --- a/packages/grpc-js-xds/src/generated/google/protobuf/EnumOptions.ts +++ b/packages/grpc-js-xds/src/generated/google/protobuf/EnumOptions.ts @@ -1,18 +1,15 @@ // Original file: null import type { UninterpretedOption as _google_protobuf_UninterpretedOption, UninterpretedOption__Output as _google_protobuf_UninterpretedOption__Output } from '../../google/protobuf/UninterpretedOption'; -import type { MigrateAnnotation as _udpa_annotations_MigrateAnnotation, MigrateAnnotation__Output as _udpa_annotations_MigrateAnnotation__Output } from '../../udpa/annotations/MigrateAnnotation'; export interface EnumOptions { 'allowAlias'?: (boolean); 'deprecated'?: (boolean); 'uninterpretedOption'?: (_google_protobuf_UninterpretedOption)[]; - '.udpa.annotations.enum_migrate'?: (_udpa_annotations_MigrateAnnotation); } export interface EnumOptions__Output { 'allowAlias': (boolean); 'deprecated': (boolean); 'uninterpretedOption': (_google_protobuf_UninterpretedOption__Output)[]; - '.udpa.annotations.enum_migrate'?: (_udpa_annotations_MigrateAnnotation__Output); } diff --git a/packages/grpc-js-xds/src/generated/google/protobuf/EnumValueDescriptorProto.ts b/packages/grpc-js-xds/src/generated/google/protobuf/EnumValueDescriptorProto.ts index 238e7fd01..7f8e57ea5 100644 --- a/packages/grpc-js-xds/src/generated/google/protobuf/EnumValueDescriptorProto.ts +++ b/packages/grpc-js-xds/src/generated/google/protobuf/EnumValueDescriptorProto.ts @@ -5,11 +5,11 @@ import type { EnumValueOptions as _google_protobuf_EnumValueOptions, EnumValueOp export interface EnumValueDescriptorProto { 'name'?: (string); 'number'?: (number); - 'options'?: (_google_protobuf_EnumValueOptions); + 'options'?: (_google_protobuf_EnumValueOptions | null); } export interface EnumValueDescriptorProto__Output { 'name': (string); 'number': (number); - 'options'?: (_google_protobuf_EnumValueOptions__Output); + 'options': (_google_protobuf_EnumValueOptions__Output | null); } diff --git a/packages/grpc-js-xds/src/generated/google/protobuf/EnumValueOptions.ts b/packages/grpc-js-xds/src/generated/google/protobuf/EnumValueOptions.ts index db2770534..e60ee6f4c 100644 --- a/packages/grpc-js-xds/src/generated/google/protobuf/EnumValueOptions.ts +++ b/packages/grpc-js-xds/src/generated/google/protobuf/EnumValueOptions.ts @@ -1,18 +1,13 @@ // Original file: null import type { UninterpretedOption as _google_protobuf_UninterpretedOption, UninterpretedOption__Output as _google_protobuf_UninterpretedOption__Output } from '../../google/protobuf/UninterpretedOption'; -import type { MigrateAnnotation as _udpa_annotations_MigrateAnnotation, MigrateAnnotation__Output as _udpa_annotations_MigrateAnnotation__Output } from '../../udpa/annotations/MigrateAnnotation'; export interface EnumValueOptions { 'deprecated'?: (boolean); 'uninterpretedOption'?: (_google_protobuf_UninterpretedOption)[]; - '.envoy.annotations.disallowed_by_default_enum'?: (boolean); - '.udpa.annotations.enum_value_migrate'?: (_udpa_annotations_MigrateAnnotation); } export interface EnumValueOptions__Output { 'deprecated': (boolean); 'uninterpretedOption': (_google_protobuf_UninterpretedOption__Output)[]; - '.envoy.annotations.disallowed_by_default_enum': (boolean); - '.udpa.annotations.enum_value_migrate'?: (_udpa_annotations_MigrateAnnotation__Output); } diff --git a/packages/grpc-js-xds/src/generated/google/protobuf/FieldDescriptorProto.ts b/packages/grpc-js-xds/src/generated/google/protobuf/FieldDescriptorProto.ts index b59518c4b..4951919fd 100644 --- a/packages/grpc-js-xds/src/generated/google/protobuf/FieldDescriptorProto.ts +++ b/packages/grpc-js-xds/src/generated/google/protobuf/FieldDescriptorProto.ts @@ -4,44 +4,94 @@ import type { FieldOptions as _google_protobuf_FieldOptions, FieldOptions__Outpu // Original file: null -export enum _google_protobuf_FieldDescriptorProto_Label { - LABEL_OPTIONAL = 1, - LABEL_REQUIRED = 2, - LABEL_REPEATED = 3, -} +export const _google_protobuf_FieldDescriptorProto_Label = { + LABEL_OPTIONAL: 'LABEL_OPTIONAL', + LABEL_REQUIRED: 'LABEL_REQUIRED', + LABEL_REPEATED: 'LABEL_REPEATED', +} as const; + +export type _google_protobuf_FieldDescriptorProto_Label = + | 'LABEL_OPTIONAL' + | 1 + | 'LABEL_REQUIRED' + | 2 + | 'LABEL_REPEATED' + | 3 + +export type _google_protobuf_FieldDescriptorProto_Label__Output = typeof _google_protobuf_FieldDescriptorProto_Label[keyof typeof _google_protobuf_FieldDescriptorProto_Label] // Original file: null -export enum _google_protobuf_FieldDescriptorProto_Type { - TYPE_DOUBLE = 1, - TYPE_FLOAT = 2, - TYPE_INT64 = 3, - TYPE_UINT64 = 4, - TYPE_INT32 = 5, - TYPE_FIXED64 = 6, - TYPE_FIXED32 = 7, - TYPE_BOOL = 8, - TYPE_STRING = 9, - TYPE_GROUP = 10, - TYPE_MESSAGE = 11, - TYPE_BYTES = 12, - TYPE_UINT32 = 13, - TYPE_ENUM = 14, - TYPE_SFIXED32 = 15, - TYPE_SFIXED64 = 16, - TYPE_SINT32 = 17, - TYPE_SINT64 = 18, -} +export const _google_protobuf_FieldDescriptorProto_Type = { + TYPE_DOUBLE: 'TYPE_DOUBLE', + TYPE_FLOAT: 'TYPE_FLOAT', + TYPE_INT64: 'TYPE_INT64', + TYPE_UINT64: 'TYPE_UINT64', + TYPE_INT32: 'TYPE_INT32', + TYPE_FIXED64: 'TYPE_FIXED64', + TYPE_FIXED32: 'TYPE_FIXED32', + TYPE_BOOL: 'TYPE_BOOL', + TYPE_STRING: 'TYPE_STRING', + TYPE_GROUP: 'TYPE_GROUP', + TYPE_MESSAGE: 'TYPE_MESSAGE', + TYPE_BYTES: 'TYPE_BYTES', + TYPE_UINT32: 'TYPE_UINT32', + TYPE_ENUM: 'TYPE_ENUM', + TYPE_SFIXED32: 'TYPE_SFIXED32', + TYPE_SFIXED64: 'TYPE_SFIXED64', + TYPE_SINT32: 'TYPE_SINT32', + TYPE_SINT64: 'TYPE_SINT64', +} as const; + +export type _google_protobuf_FieldDescriptorProto_Type = + | 'TYPE_DOUBLE' + | 1 + | 'TYPE_FLOAT' + | 2 + | 'TYPE_INT64' + | 3 + | 'TYPE_UINT64' + | 4 + | 'TYPE_INT32' + | 5 + | 'TYPE_FIXED64' + | 6 + | 'TYPE_FIXED32' + | 7 + | 'TYPE_BOOL' + | 8 + | 'TYPE_STRING' + | 9 + | 'TYPE_GROUP' + | 10 + | 'TYPE_MESSAGE' + | 11 + | 'TYPE_BYTES' + | 12 + | 'TYPE_UINT32' + | 13 + | 'TYPE_ENUM' + | 14 + | 'TYPE_SFIXED32' + | 15 + | 'TYPE_SFIXED64' + | 16 + | 'TYPE_SINT32' + | 17 + | 'TYPE_SINT64' + | 18 + +export type _google_protobuf_FieldDescriptorProto_Type__Output = typeof _google_protobuf_FieldDescriptorProto_Type[keyof typeof _google_protobuf_FieldDescriptorProto_Type] export interface FieldDescriptorProto { 'name'?: (string); 'extendee'?: (string); 'number'?: (number); - 'label'?: (_google_protobuf_FieldDescriptorProto_Label | keyof typeof _google_protobuf_FieldDescriptorProto_Label); - 'type'?: (_google_protobuf_FieldDescriptorProto_Type | keyof typeof _google_protobuf_FieldDescriptorProto_Type); + 'label'?: (_google_protobuf_FieldDescriptorProto_Label); + 'type'?: (_google_protobuf_FieldDescriptorProto_Type); 'typeName'?: (string); 'defaultValue'?: (string); - 'options'?: (_google_protobuf_FieldOptions); + 'options'?: (_google_protobuf_FieldOptions | null); 'oneofIndex'?: (number); 'jsonName'?: (string); } @@ -50,11 +100,11 @@ export interface FieldDescriptorProto__Output { 'name': (string); 'extendee': (string); 'number': (number); - 'label': (keyof typeof _google_protobuf_FieldDescriptorProto_Label); - 'type': (keyof typeof _google_protobuf_FieldDescriptorProto_Type); + 'label': (_google_protobuf_FieldDescriptorProto_Label__Output); + 'type': (_google_protobuf_FieldDescriptorProto_Type__Output); 'typeName': (string); 'defaultValue': (string); - 'options'?: (_google_protobuf_FieldOptions__Output); + 'options': (_google_protobuf_FieldOptions__Output | null); 'oneofIndex': (number); 'jsonName': (string); } diff --git a/packages/grpc-js-xds/src/generated/google/protobuf/FieldOptions.ts b/packages/grpc-js-xds/src/generated/google/protobuf/FieldOptions.ts index b76a60815..b301f2958 100644 --- a/packages/grpc-js-xds/src/generated/google/protobuf/FieldOptions.ts +++ b/packages/grpc-js-xds/src/generated/google/protobuf/FieldOptions.ts @@ -1,49 +1,59 @@ // Original file: null import type { UninterpretedOption as _google_protobuf_UninterpretedOption, UninterpretedOption__Output as _google_protobuf_UninterpretedOption__Output } from '../../google/protobuf/UninterpretedOption'; -import type { FieldRules as _validate_FieldRules, FieldRules__Output as _validate_FieldRules__Output } from '../../validate/FieldRules'; -import type { FieldMigrateAnnotation as _udpa_annotations_FieldMigrateAnnotation, FieldMigrateAnnotation__Output as _udpa_annotations_FieldMigrateAnnotation__Output } from '../../udpa/annotations/FieldMigrateAnnotation'; // Original file: null -export enum _google_protobuf_FieldOptions_CType { - STRING = 0, - CORD = 1, - STRING_PIECE = 2, -} +export const _google_protobuf_FieldOptions_CType = { + STRING: 'STRING', + CORD: 'CORD', + STRING_PIECE: 'STRING_PIECE', +} as const; + +export type _google_protobuf_FieldOptions_CType = + | 'STRING' + | 0 + | 'CORD' + | 1 + | 'STRING_PIECE' + | 2 + +export type _google_protobuf_FieldOptions_CType__Output = typeof _google_protobuf_FieldOptions_CType[keyof typeof _google_protobuf_FieldOptions_CType] // Original file: null -export enum _google_protobuf_FieldOptions_JSType { - JS_NORMAL = 0, - JS_STRING = 1, - JS_NUMBER = 2, -} +export const _google_protobuf_FieldOptions_JSType = { + JS_NORMAL: 'JS_NORMAL', + JS_STRING: 'JS_STRING', + JS_NUMBER: 'JS_NUMBER', +} as const; + +export type _google_protobuf_FieldOptions_JSType = + | 'JS_NORMAL' + | 0 + | 'JS_STRING' + | 1 + | 'JS_NUMBER' + | 2 + +export type _google_protobuf_FieldOptions_JSType__Output = typeof _google_protobuf_FieldOptions_JSType[keyof typeof _google_protobuf_FieldOptions_JSType] export interface FieldOptions { - 'ctype'?: (_google_protobuf_FieldOptions_CType | keyof typeof _google_protobuf_FieldOptions_CType); + 'ctype'?: (_google_protobuf_FieldOptions_CType); 'packed'?: (boolean); 'deprecated'?: (boolean); 'lazy'?: (boolean); - 'jstype'?: (_google_protobuf_FieldOptions_JSType | keyof typeof _google_protobuf_FieldOptions_JSType); + 'jstype'?: (_google_protobuf_FieldOptions_JSType); 'weak'?: (boolean); 'uninterpretedOption'?: (_google_protobuf_UninterpretedOption)[]; - '.validate.rules'?: (_validate_FieldRules); - '.udpa.annotations.sensitive'?: (boolean); - '.udpa.annotations.field_migrate'?: (_udpa_annotations_FieldMigrateAnnotation); - '.envoy.annotations.disallowed_by_default'?: (boolean); } export interface FieldOptions__Output { - 'ctype': (keyof typeof _google_protobuf_FieldOptions_CType); + 'ctype': (_google_protobuf_FieldOptions_CType__Output); 'packed': (boolean); 'deprecated': (boolean); 'lazy': (boolean); - 'jstype': (keyof typeof _google_protobuf_FieldOptions_JSType); + 'jstype': (_google_protobuf_FieldOptions_JSType__Output); 'weak': (boolean); 'uninterpretedOption': (_google_protobuf_UninterpretedOption__Output)[]; - '.validate.rules'?: (_validate_FieldRules__Output); - '.udpa.annotations.sensitive': (boolean); - '.udpa.annotations.field_migrate'?: (_udpa_annotations_FieldMigrateAnnotation__Output); - '.envoy.annotations.disallowed_by_default': (boolean); } diff --git a/packages/grpc-js-xds/src/generated/google/protobuf/FileDescriptorProto.ts b/packages/grpc-js-xds/src/generated/google/protobuf/FileDescriptorProto.ts index 2954e4208..b723da7c0 100644 --- a/packages/grpc-js-xds/src/generated/google/protobuf/FileDescriptorProto.ts +++ b/packages/grpc-js-xds/src/generated/google/protobuf/FileDescriptorProto.ts @@ -15,8 +15,8 @@ export interface FileDescriptorProto { 'enumType'?: (_google_protobuf_EnumDescriptorProto)[]; 'service'?: (_google_protobuf_ServiceDescriptorProto)[]; 'extension'?: (_google_protobuf_FieldDescriptorProto)[]; - 'options'?: (_google_protobuf_FileOptions); - 'sourceCodeInfo'?: (_google_protobuf_SourceCodeInfo); + 'options'?: (_google_protobuf_FileOptions | null); + 'sourceCodeInfo'?: (_google_protobuf_SourceCodeInfo | null); 'publicDependency'?: (number)[]; 'weakDependency'?: (number)[]; 'syntax'?: (string); @@ -30,8 +30,8 @@ export interface FileDescriptorProto__Output { 'enumType': (_google_protobuf_EnumDescriptorProto__Output)[]; 'service': (_google_protobuf_ServiceDescriptorProto__Output)[]; 'extension': (_google_protobuf_FieldDescriptorProto__Output)[]; - 'options'?: (_google_protobuf_FileOptions__Output); - 'sourceCodeInfo'?: (_google_protobuf_SourceCodeInfo__Output); + 'options': (_google_protobuf_FileOptions__Output | null); + 'sourceCodeInfo': (_google_protobuf_SourceCodeInfo__Output | null); 'publicDependency': (number)[]; 'weakDependency': (number)[]; 'syntax': (string); diff --git a/packages/grpc-js-xds/src/generated/google/protobuf/FileOptions.ts b/packages/grpc-js-xds/src/generated/google/protobuf/FileOptions.ts index b2ddbb374..6fab1a84b 100644 --- a/packages/grpc-js-xds/src/generated/google/protobuf/FileOptions.ts +++ b/packages/grpc-js-xds/src/generated/google/protobuf/FileOptions.ts @@ -1,26 +1,38 @@ // Original file: null import type { UninterpretedOption as _google_protobuf_UninterpretedOption, UninterpretedOption__Output as _google_protobuf_UninterpretedOption__Output } from '../../google/protobuf/UninterpretedOption'; -import type { FileMigrateAnnotation as _udpa_annotations_FileMigrateAnnotation, FileMigrateAnnotation__Output as _udpa_annotations_FileMigrateAnnotation__Output } from '../../udpa/annotations/FileMigrateAnnotation'; import type { StatusAnnotation as _udpa_annotations_StatusAnnotation, StatusAnnotation__Output as _udpa_annotations_StatusAnnotation__Output } from '../../udpa/annotations/StatusAnnotation'; // Original file: null -export enum _google_protobuf_FileOptions_OptimizeMode { - SPEED = 1, - CODE_SIZE = 2, - LITE_RUNTIME = 3, -} +export const _google_protobuf_FileOptions_OptimizeMode = { + SPEED: 'SPEED', + CODE_SIZE: 'CODE_SIZE', + LITE_RUNTIME: 'LITE_RUNTIME', +} as const; + +export type _google_protobuf_FileOptions_OptimizeMode = + | 'SPEED' + | 1 + | 'CODE_SIZE' + | 2 + | 'LITE_RUNTIME' + | 3 + +export type _google_protobuf_FileOptions_OptimizeMode__Output = typeof _google_protobuf_FileOptions_OptimizeMode[keyof typeof _google_protobuf_FileOptions_OptimizeMode] export interface FileOptions { 'javaPackage'?: (string); 'javaOuterClassname'?: (string); - 'optimizeFor'?: (_google_protobuf_FileOptions_OptimizeMode | keyof typeof _google_protobuf_FileOptions_OptimizeMode); + 'optimizeFor'?: (_google_protobuf_FileOptions_OptimizeMode); 'javaMultipleFiles'?: (boolean); 'goPackage'?: (string); 'ccGenericServices'?: (boolean); 'javaGenericServices'?: (boolean); 'pyGenericServices'?: (boolean); + /** + * @deprecated + */ 'javaGenerateEqualsAndHash'?: (boolean); 'deprecated'?: (boolean); 'javaStringCheckUtf8'?: (boolean); @@ -28,19 +40,21 @@ export interface FileOptions { 'objcClassPrefix'?: (string); 'csharpNamespace'?: (string); 'uninterpretedOption'?: (_google_protobuf_UninterpretedOption)[]; - '.udpa.annotations.file_migrate'?: (_udpa_annotations_FileMigrateAnnotation); - '.udpa.annotations.file_status'?: (_udpa_annotations_StatusAnnotation); + '.udpa.annotations.file_status'?: (_udpa_annotations_StatusAnnotation | null); } export interface FileOptions__Output { 'javaPackage': (string); 'javaOuterClassname': (string); - 'optimizeFor': (keyof typeof _google_protobuf_FileOptions_OptimizeMode); + 'optimizeFor': (_google_protobuf_FileOptions_OptimizeMode__Output); 'javaMultipleFiles': (boolean); 'goPackage': (string); 'ccGenericServices': (boolean); 'javaGenericServices': (boolean); 'pyGenericServices': (boolean); + /** + * @deprecated + */ 'javaGenerateEqualsAndHash': (boolean); 'deprecated': (boolean); 'javaStringCheckUtf8': (boolean); @@ -48,6 +62,5 @@ export interface FileOptions__Output { 'objcClassPrefix': (string); 'csharpNamespace': (string); 'uninterpretedOption': (_google_protobuf_UninterpretedOption__Output)[]; - '.udpa.annotations.file_migrate'?: (_udpa_annotations_FileMigrateAnnotation__Output); - '.udpa.annotations.file_status'?: (_udpa_annotations_StatusAnnotation__Output); + '.udpa.annotations.file_status': (_udpa_annotations_StatusAnnotation__Output | null); } diff --git a/packages/grpc-js-xds/src/generated/google/protobuf/FloatValue.ts b/packages/grpc-js-xds/src/generated/google/protobuf/FloatValue.ts index 144a9a585..54a655fbb 100644 --- a/packages/grpc-js-xds/src/generated/google/protobuf/FloatValue.ts +++ b/packages/grpc-js-xds/src/generated/google/protobuf/FloatValue.ts @@ -6,5 +6,5 @@ export interface FloatValue { } export interface FloatValue__Output { - 'value': (number | string); + 'value': (number); } diff --git a/packages/grpc-js-xds/src/generated/google/protobuf/MessageOptions.ts b/packages/grpc-js-xds/src/generated/google/protobuf/MessageOptions.ts index 7560daa28..31f669eb0 100644 --- a/packages/grpc-js-xds/src/generated/google/protobuf/MessageOptions.ts +++ b/packages/grpc-js-xds/src/generated/google/protobuf/MessageOptions.ts @@ -1,7 +1,6 @@ // Original file: null import type { UninterpretedOption as _google_protobuf_UninterpretedOption, UninterpretedOption__Output as _google_protobuf_UninterpretedOption__Output } from '../../google/protobuf/UninterpretedOption'; -import type { MigrateAnnotation as _udpa_annotations_MigrateAnnotation, MigrateAnnotation__Output as _udpa_annotations_MigrateAnnotation__Output } from '../../udpa/annotations/MigrateAnnotation'; export interface MessageOptions { 'messageSetWireFormat'?: (boolean); @@ -9,8 +8,6 @@ export interface MessageOptions { 'deprecated'?: (boolean); 'mapEntry'?: (boolean); 'uninterpretedOption'?: (_google_protobuf_UninterpretedOption)[]; - '.validate.disabled'?: (boolean); - '.udpa.annotations.message_migrate'?: (_udpa_annotations_MigrateAnnotation); } export interface MessageOptions__Output { @@ -19,6 +16,4 @@ export interface MessageOptions__Output { 'deprecated': (boolean); 'mapEntry': (boolean); 'uninterpretedOption': (_google_protobuf_UninterpretedOption__Output)[]; - '.validate.disabled': (boolean); - '.udpa.annotations.message_migrate'?: (_udpa_annotations_MigrateAnnotation__Output); } diff --git a/packages/grpc-js-xds/src/generated/google/protobuf/MethodDescriptorProto.ts b/packages/grpc-js-xds/src/generated/google/protobuf/MethodDescriptorProto.ts index bc2f0afb5..c76c0ea23 100644 --- a/packages/grpc-js-xds/src/generated/google/protobuf/MethodDescriptorProto.ts +++ b/packages/grpc-js-xds/src/generated/google/protobuf/MethodDescriptorProto.ts @@ -6,7 +6,7 @@ export interface MethodDescriptorProto { 'name'?: (string); 'inputType'?: (string); 'outputType'?: (string); - 'options'?: (_google_protobuf_MethodOptions); + 'options'?: (_google_protobuf_MethodOptions | null); 'clientStreaming'?: (boolean); 'serverStreaming'?: (boolean); } @@ -15,7 +15,7 @@ export interface MethodDescriptorProto__Output { 'name': (string); 'inputType': (string); 'outputType': (string); - 'options'?: (_google_protobuf_MethodOptions__Output); + 'options': (_google_protobuf_MethodOptions__Output | null); 'clientStreaming': (boolean); 'serverStreaming': (boolean); } diff --git a/packages/grpc-js-xds/src/generated/google/protobuf/NullValue.ts b/packages/grpc-js-xds/src/generated/google/protobuf/NullValue.ts index 377aab885..c66dacc7b 100644 --- a/packages/grpc-js-xds/src/generated/google/protobuf/NullValue.ts +++ b/packages/grpc-js-xds/src/generated/google/protobuf/NullValue.ts @@ -1,5 +1,11 @@ // Original file: null -export enum NullValue { - NULL_VALUE = 0, -} +export const NullValue = { + NULL_VALUE: 'NULL_VALUE', +} as const; + +export type NullValue = + | 'NULL_VALUE' + | 0 + +export type NullValue__Output = typeof NullValue[keyof typeof NullValue] diff --git a/packages/grpc-js-xds/src/generated/google/protobuf/OneofDescriptorProto.ts b/packages/grpc-js-xds/src/generated/google/protobuf/OneofDescriptorProto.ts index c10ccecd3..636f13ed4 100644 --- a/packages/grpc-js-xds/src/generated/google/protobuf/OneofDescriptorProto.ts +++ b/packages/grpc-js-xds/src/generated/google/protobuf/OneofDescriptorProto.ts @@ -4,10 +4,10 @@ import type { OneofOptions as _google_protobuf_OneofOptions, OneofOptions__Outpu export interface OneofDescriptorProto { 'name'?: (string); - 'options'?: (_google_protobuf_OneofOptions); + 'options'?: (_google_protobuf_OneofOptions | null); } export interface OneofDescriptorProto__Output { 'name': (string); - 'options'?: (_google_protobuf_OneofOptions__Output); + 'options': (_google_protobuf_OneofOptions__Output | null); } diff --git a/packages/grpc-js-xds/src/generated/google/protobuf/OneofOptions.ts b/packages/grpc-js-xds/src/generated/google/protobuf/OneofOptions.ts index b54ecb0b1..d81d34797 100644 --- a/packages/grpc-js-xds/src/generated/google/protobuf/OneofOptions.ts +++ b/packages/grpc-js-xds/src/generated/google/protobuf/OneofOptions.ts @@ -4,10 +4,8 @@ import type { UninterpretedOption as _google_protobuf_UninterpretedOption, Unint export interface OneofOptions { 'uninterpretedOption'?: (_google_protobuf_UninterpretedOption)[]; - '.validate.required'?: (boolean); } export interface OneofOptions__Output { 'uninterpretedOption': (_google_protobuf_UninterpretedOption__Output)[]; - '.validate.required': (boolean); } diff --git a/packages/grpc-js-xds/src/generated/google/protobuf/ServiceDescriptorProto.ts b/packages/grpc-js-xds/src/generated/google/protobuf/ServiceDescriptorProto.ts index 695a8775c..40c9263ea 100644 --- a/packages/grpc-js-xds/src/generated/google/protobuf/ServiceDescriptorProto.ts +++ b/packages/grpc-js-xds/src/generated/google/protobuf/ServiceDescriptorProto.ts @@ -6,11 +6,11 @@ import type { ServiceOptions as _google_protobuf_ServiceOptions, ServiceOptions_ export interface ServiceDescriptorProto { 'name'?: (string); 'method'?: (_google_protobuf_MethodDescriptorProto)[]; - 'options'?: (_google_protobuf_ServiceOptions); + 'options'?: (_google_protobuf_ServiceOptions | null); } export interface ServiceDescriptorProto__Output { 'name': (string); 'method': (_google_protobuf_MethodDescriptorProto__Output)[]; - 'options'?: (_google_protobuf_ServiceOptions__Output); + 'options': (_google_protobuf_ServiceOptions__Output | null); } diff --git a/packages/grpc-js-xds/src/generated/google/protobuf/Struct.ts b/packages/grpc-js-xds/src/generated/google/protobuf/Struct.ts index 9919350e4..41b79eab3 100644 --- a/packages/grpc-js-xds/src/generated/google/protobuf/Struct.ts +++ b/packages/grpc-js-xds/src/generated/google/protobuf/Struct.ts @@ -7,5 +7,5 @@ export interface Struct { } export interface Struct__Output { - 'fields'?: ({[key: string]: _google_protobuf_Value__Output}); + 'fields': ({[key: string]: _google_protobuf_Value__Output}); } diff --git a/packages/grpc-js-xds/src/generated/google/protobuf/UninterpretedOption.ts b/packages/grpc-js-xds/src/generated/google/protobuf/UninterpretedOption.ts index 433820f55..6e9fc275b 100644 --- a/packages/grpc-js-xds/src/generated/google/protobuf/UninterpretedOption.ts +++ b/packages/grpc-js-xds/src/generated/google/protobuf/UninterpretedOption.ts @@ -27,7 +27,7 @@ export interface UninterpretedOption__Output { 'identifierValue': (string); 'positiveIntValue': (string); 'negativeIntValue': (string); - 'doubleValue': (number | string); + 'doubleValue': (number); 'stringValue': (Buffer); 'aggregateValue': (string); } diff --git a/packages/grpc-js-xds/src/generated/google/protobuf/Value.ts b/packages/grpc-js-xds/src/generated/google/protobuf/Value.ts index 68b665dcb..67cc03fff 100644 --- a/packages/grpc-js-xds/src/generated/google/protobuf/Value.ts +++ b/packages/grpc-js-xds/src/generated/google/protobuf/Value.ts @@ -1,25 +1,25 @@ // Original file: null -import type { NullValue as _google_protobuf_NullValue } from '../../google/protobuf/NullValue'; +import type { NullValue as _google_protobuf_NullValue, NullValue__Output as _google_protobuf_NullValue__Output } from '../../google/protobuf/NullValue'; import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../google/protobuf/Struct'; import type { ListValue as _google_protobuf_ListValue, ListValue__Output as _google_protobuf_ListValue__Output } from '../../google/protobuf/ListValue'; export interface Value { - 'nullValue'?: (_google_protobuf_NullValue | keyof typeof _google_protobuf_NullValue); + 'nullValue'?: (_google_protobuf_NullValue); 'numberValue'?: (number | string); 'stringValue'?: (string); 'boolValue'?: (boolean); - 'structValue'?: (_google_protobuf_Struct); - 'listValue'?: (_google_protobuf_ListValue); + 'structValue'?: (_google_protobuf_Struct | null); + 'listValue'?: (_google_protobuf_ListValue | null); 'kind'?: "nullValue"|"numberValue"|"stringValue"|"boolValue"|"structValue"|"listValue"; } export interface Value__Output { - 'nullValue'?: (keyof typeof _google_protobuf_NullValue); - 'numberValue'?: (number | string); + 'nullValue'?: (_google_protobuf_NullValue__Output); + 'numberValue'?: (number); 'stringValue'?: (string); 'boolValue'?: (boolean); - 'structValue'?: (_google_protobuf_Struct__Output); - 'listValue'?: (_google_protobuf_ListValue__Output); + 'structValue'?: (_google_protobuf_Struct__Output | null); + 'listValue'?: (_google_protobuf_ListValue__Output | null); 'kind': "nullValue"|"numberValue"|"stringValue"|"boolValue"|"structValue"|"listValue"; } diff --git a/packages/grpc-js-xds/src/generated/http_connection_manager.ts b/packages/grpc-js-xds/src/generated/http_connection_manager.ts index 7e822564d..e0e06f904 100644 --- a/packages/grpc-js-xds/src/generated/http_connection_manager.ts +++ b/packages/grpc-js-xds/src/generated/http_connection_manager.ts @@ -1,5 +1,5 @@ import type * as grpc from '@grpc/grpc-js'; -import type { ServiceDefinition, EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; +import type { EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; type SubtypeConstructor any, Subtype> = { @@ -10,14 +10,32 @@ export interface ProtoGrpcType { envoy: { annotations: { } - api: { - v2: { - RouteConfiguration: MessageTypeDefinition - ScopedRouteConfiguration: MessageTypeDefinition - Vhds: MessageTypeDefinition - core: { + config: { + accesslog: { + v3: { + AccessLog: MessageTypeDefinition + AccessLogFilter: MessageTypeDefinition + AndFilter: MessageTypeDefinition + ComparisonFilter: MessageTypeDefinition + DurationFilter: MessageTypeDefinition + ExtensionFilter: MessageTypeDefinition + GrpcStatusFilter: MessageTypeDefinition + HeaderFilter: MessageTypeDefinition + LogTypeFilter: MessageTypeDefinition + MetadataFilter: MessageTypeDefinition + NotHealthCheckFilter: MessageTypeDefinition + OrFilter: MessageTypeDefinition + ResponseFlagFilter: MessageTypeDefinition + RuntimeFilter: MessageTypeDefinition + StatusCodeFilter: MessageTypeDefinition + TraceableFilter: MessageTypeDefinition + } + } + core: { + v3: { Address: MessageTypeDefinition AggregatedConfigSource: MessageTypeDefinition + AlternateProtocolsCacheOptions: MessageTypeDefinition ApiConfigSource: MessageTypeDefinition ApiVersion: EnumTypeDefinition AsyncDataSource: MessageTypeDefinition @@ -28,7 +46,10 @@ export interface ProtoGrpcType { ConfigSource: MessageTypeDefinition ControlPlane: MessageTypeDefinition DataSource: MessageTypeDefinition + EnvoyInternalAddress: MessageTypeDefinition Extension: MessageTypeDefinition + ExtensionConfigSource: MessageTypeDefinition + ExtraSourceAddress: MessageTypeDefinition GrpcProtocolOptions: MessageTypeDefinition GrpcService: MessageTypeDefinition HeaderMap: MessageTypeDefinition @@ -36,12 +57,20 @@ export interface ProtoGrpcType { HeaderValueOption: MessageTypeDefinition Http1ProtocolOptions: MessageTypeDefinition Http2ProtocolOptions: MessageTypeDefinition + Http3ProtocolOptions: MessageTypeDefinition HttpProtocolOptions: MessageTypeDefinition HttpUri: MessageTypeDefinition + KeepaliveSettings: MessageTypeDefinition Locality: MessageTypeDefinition Metadata: MessageTypeDefinition Node: MessageTypeDefinition + PathConfigSource: MessageTypeDefinition Pipe: MessageTypeDefinition + ProxyProtocolConfig: MessageTypeDefinition + ProxyProtocolPassThroughTLVs: MessageTypeDefinition + QueryParameter: MessageTypeDefinition + QuicKeepAliveSettings: MessageTypeDefinition + QuicProtocolOptions: MessageTypeDefinition RateLimitSettings: MessageTypeDefinition RemoteDataSource: MessageTypeDefinition RequestMethod: EnumTypeDefinition @@ -50,64 +79,85 @@ export interface ProtoGrpcType { RuntimeDouble: MessageTypeDefinition RuntimeFeatureFlag: MessageTypeDefinition RuntimeFractionalPercent: MessageTypeDefinition + RuntimePercent: MessageTypeDefinition RuntimeUInt32: MessageTypeDefinition + SchemeHeaderTransformation: MessageTypeDefinition SelfConfigSource: MessageTypeDefinition SocketAddress: MessageTypeDefinition SocketOption: MessageTypeDefinition + SocketOptionsOverride: MessageTypeDefinition + SubstitutionFormatString: MessageTypeDefinition TcpKeepalive: MessageTypeDefinition TcpProtocolOptions: MessageTypeDefinition TrafficDirection: EnumTypeDefinition TransportSocket: MessageTypeDefinition + TypedExtensionConfig: MessageTypeDefinition UpstreamHttpProtocolOptions: MessageTypeDefinition + WatchedDirectory: MessageTypeDefinition } - route: { + } + route: { + v3: { + ClusterSpecifierPlugin: MessageTypeDefinition CorsPolicy: MessageTypeDefinition Decorator: MessageTypeDefinition DirectResponseAction: MessageTypeDefinition FilterAction: MessageTypeDefinition + FilterConfig: MessageTypeDefinition HeaderMatcher: MessageTypeDefinition HedgePolicy: MessageTypeDefinition + InternalRedirectPolicy: MessageTypeDefinition + NonForwardingAction: MessageTypeDefinition QueryParameterMatcher: MessageTypeDefinition RateLimit: MessageTypeDefinition RedirectAction: MessageTypeDefinition RetryPolicy: MessageTypeDefinition Route: MessageTypeDefinition RouteAction: MessageTypeDefinition + RouteConfiguration: MessageTypeDefinition + RouteList: MessageTypeDefinition RouteMatch: MessageTypeDefinition + ScopedRouteConfiguration: MessageTypeDefinition Tracing: MessageTypeDefinition + Vhds: MessageTypeDefinition VirtualCluster: MessageTypeDefinition VirtualHost: MessageTypeDefinition WeightedCluster: MessageTypeDefinition } } + trace: { + v3: { + Tracing: MessageTypeDefinition + } + } } - config: { - filter: { - accesslog: { - v2: { - AccessLog: MessageTypeDefinition - AccessLogFilter: MessageTypeDefinition - AndFilter: MessageTypeDefinition - ComparisonFilter: MessageTypeDefinition - DurationFilter: MessageTypeDefinition - ExtensionFilter: MessageTypeDefinition - GrpcStatusFilter: MessageTypeDefinition - HeaderFilter: MessageTypeDefinition - NotHealthCheckFilter: MessageTypeDefinition - OrFilter: MessageTypeDefinition - ResponseFlagFilter: MessageTypeDefinition - RuntimeFilter: MessageTypeDefinition - StatusCodeFilter: MessageTypeDefinition - TraceableFilter: MessageTypeDefinition - } + data: { + accesslog: { + v3: { + AccessLogCommon: MessageTypeDefinition + AccessLogType: EnumTypeDefinition + ConnectionProperties: MessageTypeDefinition + HTTPAccessLogEntry: MessageTypeDefinition + HTTPRequestProperties: MessageTypeDefinition + HTTPResponseProperties: MessageTypeDefinition + ResponseFlags: MessageTypeDefinition + TCPAccessLogEntry: MessageTypeDefinition + TLSProperties: MessageTypeDefinition } + } + } + extensions: { + filters: { network: { http_connection_manager: { - v2: { + v3: { + EnvoyMobileHttpConnectionManager: MessageTypeDefinition HttpConnectionManager: MessageTypeDefinition HttpFilter: MessageTypeDefinition + LocalReplyConfig: MessageTypeDefinition Rds: MessageTypeDefinition RequestIDExtension: MessageTypeDefinition + ResponseMapper: MessageTypeDefinition ScopedRds: MessageTypeDefinition ScopedRouteConfigurationsList: MessageTypeDefinition ScopedRoutes: MessageTypeDefinition @@ -115,36 +165,44 @@ export interface ProtoGrpcType { } } } - trace: { - v2: { - Tracing: MessageTypeDefinition - } - } } type: { - DoubleRange: MessageTypeDefinition - FractionalPercent: MessageTypeDefinition - Int32Range: MessageTypeDefinition - Int64Range: MessageTypeDefinition - Percent: MessageTypeDefinition - SemanticVersion: MessageTypeDefinition + http: { + v3: { + PathTransformation: MessageTypeDefinition + } + } matcher: { - ListStringMatcher: MessageTypeDefinition - RegexMatchAndSubstitute: MessageTypeDefinition - RegexMatcher: MessageTypeDefinition - StringMatcher: MessageTypeDefinition + v3: { + DoubleMatcher: MessageTypeDefinition + ListMatcher: MessageTypeDefinition + ListStringMatcher: MessageTypeDefinition + MetadataMatcher: MessageTypeDefinition + RegexMatchAndSubstitute: MessageTypeDefinition + RegexMatcher: MessageTypeDefinition + StringMatcher: MessageTypeDefinition + ValueMatcher: MessageTypeDefinition + } } metadata: { - v2: { + v3: { MetadataKey: MessageTypeDefinition MetadataKind: MessageTypeDefinition } } tracing: { - v2: { + v3: { CustomTag: MessageTypeDefinition } } + v3: { + DoubleRange: MessageTypeDefinition + FractionalPercent: MessageTypeDefinition + Int32Range: MessageTypeDefinition + Int64Range: MessageTypeDefinition + Percent: MessageTypeDefinition + SemanticVersion: MessageTypeDefinition + } } } google: { @@ -191,10 +249,12 @@ export interface ProtoGrpcType { udpa: { annotations: { FieldMigrateAnnotation: MessageTypeDefinition + FieldSecurityAnnotation: MessageTypeDefinition FileMigrateAnnotation: MessageTypeDefinition MigrateAnnotation: MessageTypeDefinition PackageVersionStatus: EnumTypeDefinition StatusAnnotation: MessageTypeDefinition + VersioningAnnotation: MessageTypeDefinition } } validate: { @@ -223,5 +283,33 @@ export interface ProtoGrpcType { UInt32Rules: MessageTypeDefinition UInt64Rules: MessageTypeDefinition } + xds: { + annotations: { + v3: { + FieldStatusAnnotation: MessageTypeDefinition + FileStatusAnnotation: MessageTypeDefinition + MessageStatusAnnotation: MessageTypeDefinition + PackageVersionStatus: EnumTypeDefinition + StatusAnnotation: MessageTypeDefinition + } + } + core: { + v3: { + Authority: MessageTypeDefinition + ContextParams: MessageTypeDefinition + TypedExtensionConfig: MessageTypeDefinition + } + } + type: { + matcher: { + v3: { + ListStringMatcher: MessageTypeDefinition + Matcher: MessageTypeDefinition + RegexMatcher: MessageTypeDefinition + StringMatcher: MessageTypeDefinition + } + } + } + } } diff --git a/packages/grpc-js-xds/src/generated/listener.ts b/packages/grpc-js-xds/src/generated/listener.ts index ab67cd096..4ffc4712d 100644 --- a/packages/grpc-js-xds/src/generated/listener.ts +++ b/packages/grpc-js-xds/src/generated/listener.ts @@ -1,5 +1,5 @@ import type * as grpc from '@grpc/grpc-js'; -import type { ServiceDefinition, EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; +import type { EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; type SubtypeConstructor any, Subtype> = { @@ -10,25 +10,32 @@ export interface ProtoGrpcType { envoy: { annotations: { } - api: { - v2: { - Listener: MessageTypeDefinition - auth: { - CertificateValidationContext: MessageTypeDefinition - CommonTlsContext: MessageTypeDefinition - DownstreamTlsContext: MessageTypeDefinition - GenericSecret: MessageTypeDefinition - PrivateKeyProvider: MessageTypeDefinition - SdsSecretConfig: MessageTypeDefinition - Secret: MessageTypeDefinition - TlsCertificate: MessageTypeDefinition - TlsParameters: MessageTypeDefinition - TlsSessionTicketKeys: MessageTypeDefinition - UpstreamTlsContext: MessageTypeDefinition + config: { + accesslog: { + v3: { + AccessLog: MessageTypeDefinition + AccessLogFilter: MessageTypeDefinition + AndFilter: MessageTypeDefinition + ComparisonFilter: MessageTypeDefinition + DurationFilter: MessageTypeDefinition + ExtensionFilter: MessageTypeDefinition + GrpcStatusFilter: MessageTypeDefinition + HeaderFilter: MessageTypeDefinition + LogTypeFilter: MessageTypeDefinition + MetadataFilter: MessageTypeDefinition + NotHealthCheckFilter: MessageTypeDefinition + OrFilter: MessageTypeDefinition + ResponseFlagFilter: MessageTypeDefinition + RuntimeFilter: MessageTypeDefinition + StatusCodeFilter: MessageTypeDefinition + TraceableFilter: MessageTypeDefinition } - core: { + } + core: { + v3: { Address: MessageTypeDefinition AggregatedConfigSource: MessageTypeDefinition + AlternateProtocolsCacheOptions: MessageTypeDefinition ApiConfigSource: MessageTypeDefinition ApiVersion: EnumTypeDefinition AsyncDataSource: MessageTypeDefinition @@ -39,16 +46,31 @@ export interface ProtoGrpcType { ConfigSource: MessageTypeDefinition ControlPlane: MessageTypeDefinition DataSource: MessageTypeDefinition + EnvoyInternalAddress: MessageTypeDefinition Extension: MessageTypeDefinition + ExtensionConfigSource: MessageTypeDefinition + ExtraSourceAddress: MessageTypeDefinition + GrpcProtocolOptions: MessageTypeDefinition GrpcService: MessageTypeDefinition HeaderMap: MessageTypeDefinition HeaderValue: MessageTypeDefinition HeaderValueOption: MessageTypeDefinition + Http1ProtocolOptions: MessageTypeDefinition + Http2ProtocolOptions: MessageTypeDefinition + Http3ProtocolOptions: MessageTypeDefinition + HttpProtocolOptions: MessageTypeDefinition HttpUri: MessageTypeDefinition + KeepaliveSettings: MessageTypeDefinition Locality: MessageTypeDefinition Metadata: MessageTypeDefinition Node: MessageTypeDefinition + PathConfigSource: MessageTypeDefinition Pipe: MessageTypeDefinition + ProxyProtocolConfig: MessageTypeDefinition + ProxyProtocolPassThroughTLVs: MessageTypeDefinition + QueryParameter: MessageTypeDefinition + QuicKeepAliveSettings: MessageTypeDefinition + QuicProtocolOptions: MessageTypeDefinition RateLimitSettings: MessageTypeDefinition RemoteDataSource: MessageTypeDefinition RequestMethod: EnumTypeDefinition @@ -57,36 +79,61 @@ export interface ProtoGrpcType { RuntimeDouble: MessageTypeDefinition RuntimeFeatureFlag: MessageTypeDefinition RuntimeFractionalPercent: MessageTypeDefinition + RuntimePercent: MessageTypeDefinition RuntimeUInt32: MessageTypeDefinition + SchemeHeaderTransformation: MessageTypeDefinition SelfConfigSource: MessageTypeDefinition SocketAddress: MessageTypeDefinition SocketOption: MessageTypeDefinition + SocketOptionsOverride: MessageTypeDefinition TcpKeepalive: MessageTypeDefinition + TcpProtocolOptions: MessageTypeDefinition TrafficDirection: EnumTypeDefinition TransportSocket: MessageTypeDefinition + TypedExtensionConfig: MessageTypeDefinition + UdpSocketConfig: MessageTypeDefinition + UpstreamHttpProtocolOptions: MessageTypeDefinition + WatchedDirectory: MessageTypeDefinition } - listener: { + } + listener: { + v3: { ActiveRawUdpListenerConfig: MessageTypeDefinition + AdditionalAddress: MessageTypeDefinition + ApiListener: MessageTypeDefinition + ApiListenerManager: MessageTypeDefinition Filter: MessageTypeDefinition FilterChain: MessageTypeDefinition FilterChainMatch: MessageTypeDefinition + Listener: MessageTypeDefinition + ListenerCollection: MessageTypeDefinition ListenerFilter: MessageTypeDefinition ListenerFilterChainMatchPredicate: MessageTypeDefinition + ListenerManager: MessageTypeDefinition + QuicProtocolOptions: MessageTypeDefinition UdpListenerConfig: MessageTypeDefinition + ValidationListenerManager: MessageTypeDefinition } - route: { + } + route: { + v3: { + ClusterSpecifierPlugin: MessageTypeDefinition CorsPolicy: MessageTypeDefinition Decorator: MessageTypeDefinition DirectResponseAction: MessageTypeDefinition FilterAction: MessageTypeDefinition + FilterConfig: MessageTypeDefinition HeaderMatcher: MessageTypeDefinition HedgePolicy: MessageTypeDefinition + InternalRedirectPolicy: MessageTypeDefinition + NonForwardingAction: MessageTypeDefinition QueryParameterMatcher: MessageTypeDefinition RateLimit: MessageTypeDefinition RedirectAction: MessageTypeDefinition RetryPolicy: MessageTypeDefinition Route: MessageTypeDefinition RouteAction: MessageTypeDefinition + RouteList: MessageTypeDefinition RouteMatch: MessageTypeDefinition Tracing: MessageTypeDefinition VirtualCluster: MessageTypeDefinition @@ -95,65 +142,56 @@ export interface ProtoGrpcType { } } } - config: { - filter: { - accesslog: { - v2: { - AccessLog: MessageTypeDefinition - AccessLogFilter: MessageTypeDefinition - AndFilter: MessageTypeDefinition - ComparisonFilter: MessageTypeDefinition - DurationFilter: MessageTypeDefinition - ExtensionFilter: MessageTypeDefinition - GrpcStatusFilter: MessageTypeDefinition - HeaderFilter: MessageTypeDefinition - NotHealthCheckFilter: MessageTypeDefinition - OrFilter: MessageTypeDefinition - ResponseFlagFilter: MessageTypeDefinition - RuntimeFilter: MessageTypeDefinition - StatusCodeFilter: MessageTypeDefinition - TraceableFilter: MessageTypeDefinition - } - } - } - listener: { - v2: { - ApiListener: MessageTypeDefinition + data: { + accesslog: { + v3: { + AccessLogCommon: MessageTypeDefinition + AccessLogType: EnumTypeDefinition + ConnectionProperties: MessageTypeDefinition + HTTPAccessLogEntry: MessageTypeDefinition + HTTPRequestProperties: MessageTypeDefinition + HTTPResponseProperties: MessageTypeDefinition + ResponseFlags: MessageTypeDefinition + TCPAccessLogEntry: MessageTypeDefinition + TLSProperties: MessageTypeDefinition } } } type: { - DoubleRange: MessageTypeDefinition - FractionalPercent: MessageTypeDefinition - Int32Range: MessageTypeDefinition - Int64Range: MessageTypeDefinition - Percent: MessageTypeDefinition - SemanticVersion: MessageTypeDefinition matcher: { - ListStringMatcher: MessageTypeDefinition - RegexMatchAndSubstitute: MessageTypeDefinition - RegexMatcher: MessageTypeDefinition - StringMatcher: MessageTypeDefinition + v3: { + DoubleMatcher: MessageTypeDefinition + ListMatcher: MessageTypeDefinition + ListStringMatcher: MessageTypeDefinition + MetadataMatcher: MessageTypeDefinition + RegexMatchAndSubstitute: MessageTypeDefinition + RegexMatcher: MessageTypeDefinition + StringMatcher: MessageTypeDefinition + ValueMatcher: MessageTypeDefinition + } } metadata: { - v2: { + v3: { MetadataKey: MessageTypeDefinition MetadataKind: MessageTypeDefinition } } tracing: { - v2: { + v3: { CustomTag: MessageTypeDefinition } } + v3: { + DoubleRange: MessageTypeDefinition + FractionalPercent: MessageTypeDefinition + Int32Range: MessageTypeDefinition + Int64Range: MessageTypeDefinition + Percent: MessageTypeDefinition + SemanticVersion: MessageTypeDefinition + } } } google: { - api: { - CustomHttpPattern: MessageTypeDefinition - Http: MessageTypeDefinition - HttpRule: MessageTypeDefinition - } protobuf: { Any: MessageTypeDefinition BoolValue: MessageTypeDefinition @@ -197,10 +235,12 @@ export interface ProtoGrpcType { udpa: { annotations: { FieldMigrateAnnotation: MessageTypeDefinition + FieldSecurityAnnotation: MessageTypeDefinition FileMigrateAnnotation: MessageTypeDefinition MigrateAnnotation: MessageTypeDefinition PackageVersionStatus: EnumTypeDefinition StatusAnnotation: MessageTypeDefinition + VersioningAnnotation: MessageTypeDefinition } } validate: { @@ -229,5 +269,35 @@ export interface ProtoGrpcType { UInt32Rules: MessageTypeDefinition UInt64Rules: MessageTypeDefinition } + xds: { + annotations: { + v3: { + FieldStatusAnnotation: MessageTypeDefinition + FileStatusAnnotation: MessageTypeDefinition + MessageStatusAnnotation: MessageTypeDefinition + PackageVersionStatus: EnumTypeDefinition + StatusAnnotation: MessageTypeDefinition + } + } + core: { + v3: { + Authority: MessageTypeDefinition + CollectionEntry: MessageTypeDefinition + ContextParams: MessageTypeDefinition + ResourceLocator: MessageTypeDefinition + TypedExtensionConfig: MessageTypeDefinition + } + } + type: { + matcher: { + v3: { + ListStringMatcher: MessageTypeDefinition + Matcher: MessageTypeDefinition + RegexMatcher: MessageTypeDefinition + StringMatcher: MessageTypeDefinition + } + } + } + } } diff --git a/packages/grpc-js-xds/src/generated/lrs.ts b/packages/grpc-js-xds/src/generated/lrs.ts index f3f180807..d49f1123c 100644 --- a/packages/grpc-js-xds/src/generated/lrs.ts +++ b/packages/grpc-js-xds/src/generated/lrs.ts @@ -1,7 +1,7 @@ import type * as grpc from '@grpc/grpc-js'; -import type { ServiceDefinition, EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; +import type { EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; -import type { LoadReportingServiceClient as _envoy_service_load_stats_v2_LoadReportingServiceClient } from './envoy/service/load_stats/v2/LoadReportingService'; +import type { LoadReportingServiceClient as _envoy_service_load_stats_v3_LoadReportingServiceClient, LoadReportingServiceDefinition as _envoy_service_load_stats_v3_LoadReportingServiceDefinition } from './envoy/service/load_stats/v3/LoadReportingService'; type SubtypeConstructor any, Subtype> = { new(...args: ConstructorParameters): Subtype; @@ -9,9 +9,11 @@ type SubtypeConstructor any, Subtype> export interface ProtoGrpcType { envoy: { - api: { - v2: { - core: { + annotations: { + } + config: { + core: { + v3: { Address: MessageTypeDefinition AsyncDataSource: MessageTypeDefinition BackoffStrategy: MessageTypeDefinition @@ -20,7 +22,9 @@ export interface ProtoGrpcType { CidrRange: MessageTypeDefinition ControlPlane: MessageTypeDefinition DataSource: MessageTypeDefinition + EnvoyInternalAddress: MessageTypeDefinition Extension: MessageTypeDefinition + ExtraSourceAddress: MessageTypeDefinition HeaderMap: MessageTypeDefinition HeaderValue: MessageTypeDefinition HeaderValueOption: MessageTypeDefinition @@ -29,6 +33,7 @@ export interface ProtoGrpcType { Metadata: MessageTypeDefinition Node: MessageTypeDefinition Pipe: MessageTypeDefinition + QueryParameter: MessageTypeDefinition RemoteDataSource: MessageTypeDefinition RequestMethod: EnumTypeDefinition RetryPolicy: MessageTypeDefinition @@ -36,14 +41,19 @@ export interface ProtoGrpcType { RuntimeDouble: MessageTypeDefinition RuntimeFeatureFlag: MessageTypeDefinition RuntimeFractionalPercent: MessageTypeDefinition + RuntimePercent: MessageTypeDefinition RuntimeUInt32: MessageTypeDefinition SocketAddress: MessageTypeDefinition SocketOption: MessageTypeDefinition + SocketOptionsOverride: MessageTypeDefinition TcpKeepalive: MessageTypeDefinition TrafficDirection: EnumTypeDefinition TransportSocket: MessageTypeDefinition + WatchedDirectory: MessageTypeDefinition } - endpoint: { + } + endpoint: { + v3: { ClusterStats: MessageTypeDefinition EndpointLoadMetricStats: MessageTypeDefinition UpstreamEndpointStats: MessageTypeDefinition @@ -53,17 +63,19 @@ export interface ProtoGrpcType { } service: { load_stats: { - v2: { - LoadReportingService: SubtypeConstructor & { service: ServiceDefinition } + v3: { + LoadReportingService: SubtypeConstructor & { service: _envoy_service_load_stats_v3_LoadReportingServiceDefinition } LoadStatsRequest: MessageTypeDefinition LoadStatsResponse: MessageTypeDefinition } } } type: { - FractionalPercent: MessageTypeDefinition - Percent: MessageTypeDefinition - SemanticVersion: MessageTypeDefinition + v3: { + FractionalPercent: MessageTypeDefinition + Percent: MessageTypeDefinition + SemanticVersion: MessageTypeDefinition + } } } google: { @@ -113,6 +125,7 @@ export interface ProtoGrpcType { MigrateAnnotation: MessageTypeDefinition PackageVersionStatus: EnumTypeDefinition StatusAnnotation: MessageTypeDefinition + VersioningAnnotation: MessageTypeDefinition } } validate: { @@ -141,5 +154,21 @@ export interface ProtoGrpcType { UInt32Rules: MessageTypeDefinition UInt64Rules: MessageTypeDefinition } + xds: { + annotations: { + v3: { + FieldStatusAnnotation: MessageTypeDefinition + FileStatusAnnotation: MessageTypeDefinition + MessageStatusAnnotation: MessageTypeDefinition + PackageVersionStatus: EnumTypeDefinition + StatusAnnotation: MessageTypeDefinition + } + } + core: { + v3: { + ContextParams: MessageTypeDefinition + } + } + } } diff --git a/packages/grpc-js-xds/src/generated/pick_first.ts b/packages/grpc-js-xds/src/generated/pick_first.ts new file mode 100644 index 000000000..9bf20e03f --- /dev/null +++ b/packages/grpc-js-xds/src/generated/pick_first.ts @@ -0,0 +1,52 @@ +import type * as grpc from '@grpc/grpc-js'; +import type { EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; + + +type SubtypeConstructor any, Subtype> = { + new(...args: ConstructorParameters): Subtype; +}; + +export interface ProtoGrpcType { + envoy: { + extensions: { + load_balancing_policies: { + pick_first: { + v3: { + PickFirst: MessageTypeDefinition + } + } + } + } + } + google: { + protobuf: { + DescriptorProto: MessageTypeDefinition + EnumDescriptorProto: MessageTypeDefinition + EnumOptions: MessageTypeDefinition + EnumValueDescriptorProto: MessageTypeDefinition + EnumValueOptions: MessageTypeDefinition + FieldDescriptorProto: MessageTypeDefinition + FieldOptions: MessageTypeDefinition + FileDescriptorProto: MessageTypeDefinition + FileDescriptorSet: MessageTypeDefinition + FileOptions: MessageTypeDefinition + GeneratedCodeInfo: MessageTypeDefinition + MessageOptions: MessageTypeDefinition + MethodDescriptorProto: MessageTypeDefinition + MethodOptions: MessageTypeDefinition + OneofDescriptorProto: MessageTypeDefinition + OneofOptions: MessageTypeDefinition + ServiceDescriptorProto: MessageTypeDefinition + ServiceOptions: MessageTypeDefinition + SourceCodeInfo: MessageTypeDefinition + UninterpretedOption: MessageTypeDefinition + } + } + udpa: { + annotations: { + PackageVersionStatus: EnumTypeDefinition + StatusAnnotation: MessageTypeDefinition + } + } +} + diff --git a/packages/grpc-js-xds/src/generated/ring_hash.ts b/packages/grpc-js-xds/src/generated/ring_hash.ts new file mode 100644 index 000000000..d298067df --- /dev/null +++ b/packages/grpc-js-xds/src/generated/ring_hash.ts @@ -0,0 +1,172 @@ +import type * as grpc from '@grpc/grpc-js'; +import type { EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; + + +type SubtypeConstructor any, Subtype> = { + new(...args: ConstructorParameters): Subtype; +}; + +export interface ProtoGrpcType { + envoy: { + annotations: { + } + config: { + core: { + v3: { + Address: MessageTypeDefinition + AsyncDataSource: MessageTypeDefinition + BackoffStrategy: MessageTypeDefinition + BindConfig: MessageTypeDefinition + BuildVersion: MessageTypeDefinition + CidrRange: MessageTypeDefinition + ControlPlane: MessageTypeDefinition + DataSource: MessageTypeDefinition + EnvoyInternalAddress: MessageTypeDefinition + Extension: MessageTypeDefinition + ExtraSourceAddress: MessageTypeDefinition + HeaderMap: MessageTypeDefinition + HeaderValue: MessageTypeDefinition + HeaderValueOption: MessageTypeDefinition + HttpUri: MessageTypeDefinition + Locality: MessageTypeDefinition + Metadata: MessageTypeDefinition + Node: MessageTypeDefinition + Pipe: MessageTypeDefinition + QueryParameter: MessageTypeDefinition + RemoteDataSource: MessageTypeDefinition + RequestMethod: EnumTypeDefinition + RetryPolicy: MessageTypeDefinition + RoutingPriority: EnumTypeDefinition + RuntimeDouble: MessageTypeDefinition + RuntimeFeatureFlag: MessageTypeDefinition + RuntimeFractionalPercent: MessageTypeDefinition + RuntimePercent: MessageTypeDefinition + RuntimeUInt32: MessageTypeDefinition + SocketAddress: MessageTypeDefinition + SocketOption: MessageTypeDefinition + SocketOptionsOverride: MessageTypeDefinition + TcpKeepalive: MessageTypeDefinition + TrafficDirection: EnumTypeDefinition + TransportSocket: MessageTypeDefinition + WatchedDirectory: MessageTypeDefinition + } + } + } + extensions: { + load_balancing_policies: { + common: { + v3: { + ConsistentHashingLbConfig: MessageTypeDefinition + LocalityLbConfig: MessageTypeDefinition + SlowStartConfig: MessageTypeDefinition + } + } + ring_hash: { + v3: { + RingHash: MessageTypeDefinition + } + } + } + } + type: { + v3: { + FractionalPercent: MessageTypeDefinition + Percent: MessageTypeDefinition + SemanticVersion: MessageTypeDefinition + } + } + } + google: { + protobuf: { + Any: MessageTypeDefinition + BoolValue: MessageTypeDefinition + BytesValue: MessageTypeDefinition + DescriptorProto: MessageTypeDefinition + DoubleValue: MessageTypeDefinition + Duration: MessageTypeDefinition + EnumDescriptorProto: MessageTypeDefinition + EnumOptions: MessageTypeDefinition + EnumValueDescriptorProto: MessageTypeDefinition + EnumValueOptions: MessageTypeDefinition + FieldDescriptorProto: MessageTypeDefinition + FieldOptions: MessageTypeDefinition + FileDescriptorProto: MessageTypeDefinition + FileDescriptorSet: MessageTypeDefinition + FileOptions: MessageTypeDefinition + FloatValue: MessageTypeDefinition + GeneratedCodeInfo: MessageTypeDefinition + Int32Value: MessageTypeDefinition + Int64Value: MessageTypeDefinition + ListValue: MessageTypeDefinition + MessageOptions: MessageTypeDefinition + MethodDescriptorProto: MessageTypeDefinition + MethodOptions: MessageTypeDefinition + NullValue: EnumTypeDefinition + OneofDescriptorProto: MessageTypeDefinition + OneofOptions: MessageTypeDefinition + ServiceDescriptorProto: MessageTypeDefinition + ServiceOptions: MessageTypeDefinition + SourceCodeInfo: MessageTypeDefinition + StringValue: MessageTypeDefinition + Struct: MessageTypeDefinition + Timestamp: MessageTypeDefinition + UInt32Value: MessageTypeDefinition + UInt64Value: MessageTypeDefinition + UninterpretedOption: MessageTypeDefinition + Value: MessageTypeDefinition + } + } + udpa: { + annotations: { + FieldMigrateAnnotation: MessageTypeDefinition + FileMigrateAnnotation: MessageTypeDefinition + MigrateAnnotation: MessageTypeDefinition + PackageVersionStatus: EnumTypeDefinition + StatusAnnotation: MessageTypeDefinition + VersioningAnnotation: MessageTypeDefinition + } + } + validate: { + AnyRules: MessageTypeDefinition + BoolRules: MessageTypeDefinition + BytesRules: MessageTypeDefinition + DoubleRules: MessageTypeDefinition + DurationRules: MessageTypeDefinition + EnumRules: MessageTypeDefinition + FieldRules: MessageTypeDefinition + Fixed32Rules: MessageTypeDefinition + Fixed64Rules: MessageTypeDefinition + FloatRules: MessageTypeDefinition + Int32Rules: MessageTypeDefinition + Int64Rules: MessageTypeDefinition + KnownRegex: EnumTypeDefinition + MapRules: MessageTypeDefinition + MessageRules: MessageTypeDefinition + RepeatedRules: MessageTypeDefinition + SFixed32Rules: MessageTypeDefinition + SFixed64Rules: MessageTypeDefinition + SInt32Rules: MessageTypeDefinition + SInt64Rules: MessageTypeDefinition + StringRules: MessageTypeDefinition + TimestampRules: MessageTypeDefinition + UInt32Rules: MessageTypeDefinition + UInt64Rules: MessageTypeDefinition + } + xds: { + annotations: { + v3: { + FieldStatusAnnotation: MessageTypeDefinition + FileStatusAnnotation: MessageTypeDefinition + MessageStatusAnnotation: MessageTypeDefinition + PackageVersionStatus: EnumTypeDefinition + StatusAnnotation: MessageTypeDefinition + } + } + core: { + v3: { + ContextParams: MessageTypeDefinition + } + } + } +} + diff --git a/packages/grpc-js-xds/src/generated/route.ts b/packages/grpc-js-xds/src/generated/route.ts index 05332fe37..25552e612 100644 --- a/packages/grpc-js-xds/src/generated/route.ts +++ b/packages/grpc-js-xds/src/generated/route.ts @@ -1,5 +1,5 @@ import type * as grpc from '@grpc/grpc-js'; -import type { ServiceDefinition, EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; +import type { EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; type SubtypeConstructor any, Subtype> = { @@ -10,11 +10,9 @@ export interface ProtoGrpcType { envoy: { annotations: { } - api: { - v2: { - RouteConfiguration: MessageTypeDefinition - Vhds: MessageTypeDefinition - core: { + config: { + core: { + v3: { Address: MessageTypeDefinition AggregatedConfigSource: MessageTypeDefinition ApiConfigSource: MessageTypeDefinition @@ -27,7 +25,10 @@ export interface ProtoGrpcType { ConfigSource: MessageTypeDefinition ControlPlane: MessageTypeDefinition DataSource: MessageTypeDefinition + EnvoyInternalAddress: MessageTypeDefinition Extension: MessageTypeDefinition + ExtensionConfigSource: MessageTypeDefinition + ExtraSourceAddress: MessageTypeDefinition GrpcService: MessageTypeDefinition HeaderMap: MessageTypeDefinition HeaderValue: MessageTypeDefinition @@ -36,7 +37,11 @@ export interface ProtoGrpcType { Locality: MessageTypeDefinition Metadata: MessageTypeDefinition Node: MessageTypeDefinition + PathConfigSource: MessageTypeDefinition Pipe: MessageTypeDefinition + ProxyProtocolConfig: MessageTypeDefinition + ProxyProtocolPassThroughTLVs: MessageTypeDefinition + QueryParameter: MessageTypeDefinition RateLimitSettings: MessageTypeDefinition RemoteDataSource: MessageTypeDefinition RequestMethod: EnumTypeDefinition @@ -45,29 +50,42 @@ export interface ProtoGrpcType { RuntimeDouble: MessageTypeDefinition RuntimeFeatureFlag: MessageTypeDefinition RuntimeFractionalPercent: MessageTypeDefinition + RuntimePercent: MessageTypeDefinition RuntimeUInt32: MessageTypeDefinition SelfConfigSource: MessageTypeDefinition SocketAddress: MessageTypeDefinition SocketOption: MessageTypeDefinition + SocketOptionsOverride: MessageTypeDefinition TcpKeepalive: MessageTypeDefinition TrafficDirection: EnumTypeDefinition TransportSocket: MessageTypeDefinition + TypedExtensionConfig: MessageTypeDefinition + WatchedDirectory: MessageTypeDefinition } - route: { + } + route: { + v3: { + ClusterSpecifierPlugin: MessageTypeDefinition CorsPolicy: MessageTypeDefinition Decorator: MessageTypeDefinition DirectResponseAction: MessageTypeDefinition FilterAction: MessageTypeDefinition + FilterConfig: MessageTypeDefinition HeaderMatcher: MessageTypeDefinition HedgePolicy: MessageTypeDefinition + InternalRedirectPolicy: MessageTypeDefinition + NonForwardingAction: MessageTypeDefinition QueryParameterMatcher: MessageTypeDefinition RateLimit: MessageTypeDefinition RedirectAction: MessageTypeDefinition RetryPolicy: MessageTypeDefinition Route: MessageTypeDefinition RouteAction: MessageTypeDefinition + RouteConfiguration: MessageTypeDefinition + RouteList: MessageTypeDefinition RouteMatch: MessageTypeDefinition Tracing: MessageTypeDefinition + Vhds: MessageTypeDefinition VirtualCluster: MessageTypeDefinition VirtualHost: MessageTypeDefinition WeightedCluster: MessageTypeDefinition @@ -75,29 +93,37 @@ export interface ProtoGrpcType { } } type: { - DoubleRange: MessageTypeDefinition - FractionalPercent: MessageTypeDefinition - Int32Range: MessageTypeDefinition - Int64Range: MessageTypeDefinition - Percent: MessageTypeDefinition - SemanticVersion: MessageTypeDefinition matcher: { - ListStringMatcher: MessageTypeDefinition - RegexMatchAndSubstitute: MessageTypeDefinition - RegexMatcher: MessageTypeDefinition - StringMatcher: MessageTypeDefinition + v3: { + DoubleMatcher: MessageTypeDefinition + ListMatcher: MessageTypeDefinition + ListStringMatcher: MessageTypeDefinition + MetadataMatcher: MessageTypeDefinition + RegexMatchAndSubstitute: MessageTypeDefinition + RegexMatcher: MessageTypeDefinition + StringMatcher: MessageTypeDefinition + ValueMatcher: MessageTypeDefinition + } } metadata: { - v2: { + v3: { MetadataKey: MessageTypeDefinition MetadataKind: MessageTypeDefinition } } tracing: { - v2: { + v3: { CustomTag: MessageTypeDefinition } } + v3: { + DoubleRange: MessageTypeDefinition + FractionalPercent: MessageTypeDefinition + Int32Range: MessageTypeDefinition + Int64Range: MessageTypeDefinition + Percent: MessageTypeDefinition + SemanticVersion: MessageTypeDefinition + } } } google: { @@ -148,6 +174,7 @@ export interface ProtoGrpcType { MigrateAnnotation: MessageTypeDefinition PackageVersionStatus: EnumTypeDefinition StatusAnnotation: MessageTypeDefinition + VersioningAnnotation: MessageTypeDefinition } } validate: { @@ -176,5 +203,33 @@ export interface ProtoGrpcType { UInt32Rules: MessageTypeDefinition UInt64Rules: MessageTypeDefinition } + xds: { + annotations: { + v3: { + FieldStatusAnnotation: MessageTypeDefinition + FileStatusAnnotation: MessageTypeDefinition + MessageStatusAnnotation: MessageTypeDefinition + PackageVersionStatus: EnumTypeDefinition + StatusAnnotation: MessageTypeDefinition + } + } + core: { + v3: { + Authority: MessageTypeDefinition + ContextParams: MessageTypeDefinition + TypedExtensionConfig: MessageTypeDefinition + } + } + type: { + matcher: { + v3: { + ListStringMatcher: MessageTypeDefinition + Matcher: MessageTypeDefinition + RegexMatcher: MessageTypeDefinition + StringMatcher: MessageTypeDefinition + } + } + } + } } diff --git a/packages/grpc-js-xds/src/generated/typed_struct.ts b/packages/grpc-js-xds/src/generated/typed_struct.ts new file mode 100644 index 000000000..e8dca13d5 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/typed_struct.ts @@ -0,0 +1,81 @@ +import type * as grpc from '@grpc/grpc-js'; +import type { EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; + + +type SubtypeConstructor any, Subtype> = { + new(...args: ConstructorParameters): Subtype; +}; + +export interface ProtoGrpcType { + google: { + protobuf: { + DescriptorProto: MessageTypeDefinition + Duration: MessageTypeDefinition + EnumDescriptorProto: MessageTypeDefinition + EnumOptions: MessageTypeDefinition + EnumValueDescriptorProto: MessageTypeDefinition + EnumValueOptions: MessageTypeDefinition + FieldDescriptorProto: MessageTypeDefinition + FieldOptions: MessageTypeDefinition + FileDescriptorProto: MessageTypeDefinition + FileDescriptorSet: MessageTypeDefinition + FileOptions: MessageTypeDefinition + GeneratedCodeInfo: MessageTypeDefinition + ListValue: MessageTypeDefinition + MessageOptions: MessageTypeDefinition + MethodDescriptorProto: MessageTypeDefinition + MethodOptions: MessageTypeDefinition + NullValue: EnumTypeDefinition + OneofDescriptorProto: MessageTypeDefinition + OneofOptions: MessageTypeDefinition + ServiceDescriptorProto: MessageTypeDefinition + ServiceOptions: MessageTypeDefinition + SourceCodeInfo: MessageTypeDefinition + Struct: MessageTypeDefinition + Timestamp: MessageTypeDefinition + UninterpretedOption: MessageTypeDefinition + Value: MessageTypeDefinition + } + } + udpa: { + type: { + v1: { + TypedStruct: MessageTypeDefinition + } + } + } + validate: { + AnyRules: MessageTypeDefinition + BoolRules: MessageTypeDefinition + BytesRules: MessageTypeDefinition + DoubleRules: MessageTypeDefinition + DurationRules: MessageTypeDefinition + EnumRules: MessageTypeDefinition + FieldRules: MessageTypeDefinition + Fixed32Rules: MessageTypeDefinition + Fixed64Rules: MessageTypeDefinition + FloatRules: MessageTypeDefinition + Int32Rules: MessageTypeDefinition + Int64Rules: MessageTypeDefinition + KnownRegex: EnumTypeDefinition + MapRules: MessageTypeDefinition + MessageRules: MessageTypeDefinition + RepeatedRules: MessageTypeDefinition + SFixed32Rules: MessageTypeDefinition + SFixed64Rules: MessageTypeDefinition + SInt32Rules: MessageTypeDefinition + SInt64Rules: MessageTypeDefinition + StringRules: MessageTypeDefinition + TimestampRules: MessageTypeDefinition + UInt32Rules: MessageTypeDefinition + UInt64Rules: MessageTypeDefinition + } + xds: { + type: { + v3: { + TypedStruct: MessageTypeDefinition + } + } + } +} + diff --git a/packages/grpc-js-xds/src/generated/udpa/annotations/FieldMigrateAnnotation.ts b/packages/grpc-js-xds/src/generated/udpa/annotations/FieldMigrateAnnotation.ts index 1ad015b25..4cbe9fdcb 100644 --- a/packages/grpc-js-xds/src/generated/udpa/annotations/FieldMigrateAnnotation.ts +++ b/packages/grpc-js-xds/src/generated/udpa/annotations/FieldMigrateAnnotation.ts @@ -1,4 +1,4 @@ -// Original file: deps/udpa/udpa/annotations/migrate.proto +// Original file: deps/xds/udpa/annotations/migrate.proto export interface FieldMigrateAnnotation { diff --git a/packages/grpc-js-xds/src/generated/udpa/annotations/FieldSecurityAnnotation.ts b/packages/grpc-js-xds/src/generated/udpa/annotations/FieldSecurityAnnotation.ts new file mode 100644 index 000000000..9c25fb84e --- /dev/null +++ b/packages/grpc-js-xds/src/generated/udpa/annotations/FieldSecurityAnnotation.ts @@ -0,0 +1,32 @@ +// Original file: deps/xds/udpa/annotations/security.proto + + +/** + * These annotations indicate metadata for the purpose of understanding the + * security significance of fields. + */ +export interface FieldSecurityAnnotation { + /** + * Field should be set in the presence of untrusted downstreams. + */ + 'configure_for_untrusted_downstream'?: (boolean); + /** + * Field should be set in the presence of untrusted upstreams. + */ + 'configure_for_untrusted_upstream'?: (boolean); +} + +/** + * These annotations indicate metadata for the purpose of understanding the + * security significance of fields. + */ +export interface FieldSecurityAnnotation__Output { + /** + * Field should be set in the presence of untrusted downstreams. + */ + 'configure_for_untrusted_downstream': (boolean); + /** + * Field should be set in the presence of untrusted upstreams. + */ + 'configure_for_untrusted_upstream': (boolean); +} diff --git a/packages/grpc-js-xds/src/generated/udpa/annotations/FileMigrateAnnotation.ts b/packages/grpc-js-xds/src/generated/udpa/annotations/FileMigrateAnnotation.ts index b7ef7c21d..95d29245d 100644 --- a/packages/grpc-js-xds/src/generated/udpa/annotations/FileMigrateAnnotation.ts +++ b/packages/grpc-js-xds/src/generated/udpa/annotations/FileMigrateAnnotation.ts @@ -1,4 +1,4 @@ -// Original file: deps/udpa/udpa/annotations/migrate.proto +// Original file: deps/xds/udpa/annotations/migrate.proto export interface FileMigrateAnnotation { diff --git a/packages/grpc-js-xds/src/generated/udpa/annotations/MigrateAnnotation.ts b/packages/grpc-js-xds/src/generated/udpa/annotations/MigrateAnnotation.ts index e3fdcaa99..16def9f28 100644 --- a/packages/grpc-js-xds/src/generated/udpa/annotations/MigrateAnnotation.ts +++ b/packages/grpc-js-xds/src/generated/udpa/annotations/MigrateAnnotation.ts @@ -1,4 +1,4 @@ -// Original file: deps/udpa/udpa/annotations/migrate.proto +// Original file: deps/xds/udpa/annotations/migrate.proto export interface MigrateAnnotation { diff --git a/packages/grpc-js-xds/src/generated/udpa/annotations/PackageVersionStatus.ts b/packages/grpc-js-xds/src/generated/udpa/annotations/PackageVersionStatus.ts index c60c3f984..4d15df739 100644 --- a/packages/grpc-js-xds/src/generated/udpa/annotations/PackageVersionStatus.ts +++ b/packages/grpc-js-xds/src/generated/udpa/annotations/PackageVersionStatus.ts @@ -1,21 +1,46 @@ -// Original file: deps/udpa/udpa/annotations/status.proto +// Original file: deps/xds/udpa/annotations/status.proto -export enum PackageVersionStatus { +export const PackageVersionStatus = { /** * Unknown package version status. */ - UNKNOWN = 0, + UNKNOWN: 'UNKNOWN', /** * This version of the package is frozen. */ - FROZEN = 1, + FROZEN: 'FROZEN', /** * This version of the package is the active development version. */ - ACTIVE = 2, + ACTIVE: 'ACTIVE', /** * This version of the package is the candidate for the next major version. It * is typically machine generated from the active development version. */ - NEXT_MAJOR_VERSION_CANDIDATE = 3, -} + NEXT_MAJOR_VERSION_CANDIDATE: 'NEXT_MAJOR_VERSION_CANDIDATE', +} as const; + +export type PackageVersionStatus = + /** + * Unknown package version status. + */ + | 'UNKNOWN' + | 0 + /** + * This version of the package is frozen. + */ + | 'FROZEN' + | 1 + /** + * This version of the package is the active development version. + */ + | 'ACTIVE' + | 2 + /** + * This version of the package is the candidate for the next major version. It + * is typically machine generated from the active development version. + */ + | 'NEXT_MAJOR_VERSION_CANDIDATE' + | 3 + +export type PackageVersionStatus__Output = typeof PackageVersionStatus[keyof typeof PackageVersionStatus] diff --git a/packages/grpc-js-xds/src/generated/udpa/annotations/StatusAnnotation.ts b/packages/grpc-js-xds/src/generated/udpa/annotations/StatusAnnotation.ts index 7b33ce9c8..f129c3c94 100644 --- a/packages/grpc-js-xds/src/generated/udpa/annotations/StatusAnnotation.ts +++ b/packages/grpc-js-xds/src/generated/udpa/annotations/StatusAnnotation.ts @@ -1,6 +1,6 @@ -// Original file: deps/udpa/udpa/annotations/status.proto +// Original file: deps/xds/udpa/annotations/status.proto -import type { PackageVersionStatus as _udpa_annotations_PackageVersionStatus } from '../../udpa/annotations/PackageVersionStatus'; +import type { PackageVersionStatus as _udpa_annotations_PackageVersionStatus, PackageVersionStatus__Output as _udpa_annotations_PackageVersionStatus__Output } from '../../udpa/annotations/PackageVersionStatus'; export interface StatusAnnotation { /** @@ -10,7 +10,7 @@ export interface StatusAnnotation { /** * The entity belongs to a package with the given version status. */ - 'package_version_status'?: (_udpa_annotations_PackageVersionStatus | keyof typeof _udpa_annotations_PackageVersionStatus); + 'package_version_status'?: (_udpa_annotations_PackageVersionStatus); } export interface StatusAnnotation__Output { @@ -21,5 +21,5 @@ export interface StatusAnnotation__Output { /** * The entity belongs to a package with the given version status. */ - 'package_version_status': (keyof typeof _udpa_annotations_PackageVersionStatus); + 'package_version_status': (_udpa_annotations_PackageVersionStatus__Output); } diff --git a/packages/grpc-js-xds/src/generated/udpa/annotations/VersioningAnnotation.ts b/packages/grpc-js-xds/src/generated/udpa/annotations/VersioningAnnotation.ts new file mode 100644 index 000000000..7a517a06c --- /dev/null +++ b/packages/grpc-js-xds/src/generated/udpa/annotations/VersioningAnnotation.ts @@ -0,0 +1,20 @@ +// Original file: deps/xds/udpa/annotations/versioning.proto + + +export interface VersioningAnnotation { + /** + * Track the previous message type. E.g. this message might be + * udpa.foo.v3alpha.Foo and it was previously udpa.bar.v2.Bar. This + * information is consumed by UDPA via proto descriptors. + */ + 'previous_message_type'?: (string); +} + +export interface VersioningAnnotation__Output { + /** + * Track the previous message type. E.g. this message might be + * udpa.foo.v3alpha.Foo and it was previously udpa.bar.v2.Bar. This + * information is consumed by UDPA via proto descriptors. + */ + 'previous_message_type': (string); +} diff --git a/packages/grpc-js-xds/src/generated/udpa/type/v1/TypedStruct.ts b/packages/grpc-js-xds/src/generated/udpa/type/v1/TypedStruct.ts new file mode 100644 index 000000000..b080c6348 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/udpa/type/v1/TypedStruct.ts @@ -0,0 +1,77 @@ +// Original file: deps/xds/udpa/type/v1/typed_struct.proto + +import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../google/protobuf/Struct'; + +/** + * A TypedStruct contains an arbitrary JSON serialized protocol buffer message with a URL that + * describes the type of the serialized message. This is very similar to google.protobuf.Any, + * instead of having protocol buffer binary, this employs google.protobuf.Struct as value. + * + * This message is intended to be embedded inside Any, so it shouldn't be directly referred + * from other UDPA messages. + * + * When packing an opaque extension config, packing the expected type into Any is preferred + * wherever possible for its efficiency. TypedStruct should be used only if a proto descriptor + * is not available, for example if: + * - A control plane sends opaque message that is originally from external source in human readable + * format such as JSON or YAML. + * - The control plane doesn't have the knowledge of the protocol buffer schema hence it cannot + * serialize the message in protocol buffer binary format. + * - The DPLB doesn't have have the knowledge of the protocol buffer schema its plugin or extension + * uses. This has to be indicated in the DPLB capability negotiation. + * + * When a DPLB receives a TypedStruct in Any, it should: + * - Check if the type_url of the TypedStruct matches the type the extension expects. + * - Convert value to the type described in type_url and perform validation. + * TODO(lizan): Figure out how TypeStruct should be used with DPLB extensions that doesn't link + * protobuf descriptor with DPLB itself, (e.g. gRPC LB Plugin, Envoy WASM extensions). + */ +export interface TypedStruct { + /** + * A URL that uniquely identifies the type of the serialize protocol buffer message. + * This has same semantics and format described in google.protobuf.Any: + * https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/any.proto + */ + 'type_url'?: (string); + /** + * A JSON representation of the above specified type. + */ + 'value'?: (_google_protobuf_Struct | null); +} + +/** + * A TypedStruct contains an arbitrary JSON serialized protocol buffer message with a URL that + * describes the type of the serialized message. This is very similar to google.protobuf.Any, + * instead of having protocol buffer binary, this employs google.protobuf.Struct as value. + * + * This message is intended to be embedded inside Any, so it shouldn't be directly referred + * from other UDPA messages. + * + * When packing an opaque extension config, packing the expected type into Any is preferred + * wherever possible for its efficiency. TypedStruct should be used only if a proto descriptor + * is not available, for example if: + * - A control plane sends opaque message that is originally from external source in human readable + * format such as JSON or YAML. + * - The control plane doesn't have the knowledge of the protocol buffer schema hence it cannot + * serialize the message in protocol buffer binary format. + * - The DPLB doesn't have have the knowledge of the protocol buffer schema its plugin or extension + * uses. This has to be indicated in the DPLB capability negotiation. + * + * When a DPLB receives a TypedStruct in Any, it should: + * - Check if the type_url of the TypedStruct matches the type the extension expects. + * - Convert value to the type described in type_url and perform validation. + * TODO(lizan): Figure out how TypeStruct should be used with DPLB extensions that doesn't link + * protobuf descriptor with DPLB itself, (e.g. gRPC LB Plugin, Envoy WASM extensions). + */ +export interface TypedStruct__Output { + /** + * A URL that uniquely identifies the type of the serialize protocol buffer message. + * This has same semantics and format described in google.protobuf.Any: + * https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/any.proto + */ + 'type_url': (string); + /** + * A JSON representation of the above specified type. + */ + 'value': (_google_protobuf_Struct__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/validate/DoubleRules.ts b/packages/grpc-js-xds/src/generated/validate/DoubleRules.ts index fead5072a..e756c86b9 100644 --- a/packages/grpc-js-xds/src/generated/validate/DoubleRules.ts +++ b/packages/grpc-js-xds/src/generated/validate/DoubleRules.ts @@ -50,37 +50,37 @@ export interface DoubleRules__Output { /** * Const specifies that this field must be exactly the specified value */ - 'const': (number | string); + 'const': (number); /** * Lt specifies that this field must be less than the specified value, * exclusive */ - 'lt': (number | string); + 'lt': (number); /** * Lte specifies that this field must be less than or equal to the * specified value, inclusive */ - 'lte': (number | string); + 'lte': (number); /** * Gt specifies that this field must be greater than the specified value, * exclusive. If the value of Gt is larger than a specified Lt or Lte, the * range is reversed. */ - 'gt': (number | string); + 'gt': (number); /** * Gte specifies that this field must be greater than or equal to the * specified value, inclusive. If the value of Gte is larger than a * specified Lt or Lte, the range is reversed. */ - 'gte': (number | string); + 'gte': (number); /** * In specifies that this field must be equal to one of the specified * values */ - 'in': (number | string)[]; + 'in': (number)[]; /** * NotIn specifies that this field cannot be equal to one of the specified * values */ - 'not_in': (number | string)[]; + 'not_in': (number)[]; } diff --git a/packages/grpc-js-xds/src/generated/validate/DurationRules.ts b/packages/grpc-js-xds/src/generated/validate/DurationRules.ts index 86b80a34d..879928e20 100644 --- a/packages/grpc-js-xds/src/generated/validate/DurationRules.ts +++ b/packages/grpc-js-xds/src/generated/validate/DurationRules.ts @@ -14,27 +14,27 @@ export interface DurationRules { /** * Const specifies that this field must be exactly the specified value */ - 'const'?: (_google_protobuf_Duration); + 'const'?: (_google_protobuf_Duration | null); /** * Lt specifies that this field must be less than the specified value, * exclusive */ - 'lt'?: (_google_protobuf_Duration); + 'lt'?: (_google_protobuf_Duration | null); /** * Lt specifies that this field must be less than the specified value, * inclusive */ - 'lte'?: (_google_protobuf_Duration); + 'lte'?: (_google_protobuf_Duration | null); /** * Gt specifies that this field must be greater than the specified value, * exclusive */ - 'gt'?: (_google_protobuf_Duration); + 'gt'?: (_google_protobuf_Duration | null); /** * Gte specifies that this field must be greater than the specified value, * inclusive */ - 'gte'?: (_google_protobuf_Duration); + 'gte'?: (_google_protobuf_Duration | null); /** * In specifies that this field must be equal to one of the specified * values @@ -59,27 +59,27 @@ export interface DurationRules__Output { /** * Const specifies that this field must be exactly the specified value */ - 'const'?: (_google_protobuf_Duration__Output); + 'const': (_google_protobuf_Duration__Output | null); /** * Lt specifies that this field must be less than the specified value, * exclusive */ - 'lt'?: (_google_protobuf_Duration__Output); + 'lt': (_google_protobuf_Duration__Output | null); /** * Lt specifies that this field must be less than the specified value, * inclusive */ - 'lte'?: (_google_protobuf_Duration__Output); + 'lte': (_google_protobuf_Duration__Output | null); /** * Gt specifies that this field must be greater than the specified value, * exclusive */ - 'gt'?: (_google_protobuf_Duration__Output); + 'gt': (_google_protobuf_Duration__Output | null); /** * Gte specifies that this field must be greater than the specified value, * inclusive */ - 'gte'?: (_google_protobuf_Duration__Output); + 'gte': (_google_protobuf_Duration__Output | null); /** * In specifies that this field must be equal to one of the specified * values diff --git a/packages/grpc-js-xds/src/generated/validate/FieldRules.ts b/packages/grpc-js-xds/src/generated/validate/FieldRules.ts index dae124bc1..ce6f313e7 100644 --- a/packages/grpc-js-xds/src/generated/validate/FieldRules.ts +++ b/packages/grpc-js-xds/src/generated/validate/FieldRules.ts @@ -22,7 +22,6 @@ import type { MapRules as _validate_MapRules, MapRules__Output as _validate_MapR import type { AnyRules as _validate_AnyRules, AnyRules__Output as _validate_AnyRules__Output } from '../validate/AnyRules'; import type { DurationRules as _validate_DurationRules, DurationRules__Output as _validate_DurationRules__Output } from '../validate/DurationRules'; import type { TimestampRules as _validate_TimestampRules, TimestampRules__Output as _validate_TimestampRules__Output } from '../validate/TimestampRules'; -import type { Long } from '@grpc/proto-loader'; /** * FieldRules encapsulates the rules for each type of field. Depending on the @@ -32,34 +31,34 @@ export interface FieldRules { /** * Scalar Field Types */ - 'float'?: (_validate_FloatRules); - 'double'?: (_validate_DoubleRules); - 'int32'?: (_validate_Int32Rules); - 'int64'?: (_validate_Int64Rules); - 'uint32'?: (_validate_UInt32Rules); - 'uint64'?: (_validate_UInt64Rules); - 'sint32'?: (_validate_SInt32Rules); - 'sint64'?: (_validate_SInt64Rules); - 'fixed32'?: (_validate_Fixed32Rules); - 'fixed64'?: (_validate_Fixed64Rules); - 'sfixed32'?: (_validate_SFixed32Rules); - 'sfixed64'?: (_validate_SFixed64Rules); - 'bool'?: (_validate_BoolRules); - 'string'?: (_validate_StringRules); - 'bytes'?: (_validate_BytesRules); + 'float'?: (_validate_FloatRules | null); + 'double'?: (_validate_DoubleRules | null); + 'int32'?: (_validate_Int32Rules | null); + 'int64'?: (_validate_Int64Rules | null); + 'uint32'?: (_validate_UInt32Rules | null); + 'uint64'?: (_validate_UInt64Rules | null); + 'sint32'?: (_validate_SInt32Rules | null); + 'sint64'?: (_validate_SInt64Rules | null); + 'fixed32'?: (_validate_Fixed32Rules | null); + 'fixed64'?: (_validate_Fixed64Rules | null); + 'sfixed32'?: (_validate_SFixed32Rules | null); + 'sfixed64'?: (_validate_SFixed64Rules | null); + 'bool'?: (_validate_BoolRules | null); + 'string'?: (_validate_StringRules | null); + 'bytes'?: (_validate_BytesRules | null); /** * Complex Field Types */ - 'enum'?: (_validate_EnumRules); - 'message'?: (_validate_MessageRules); - 'repeated'?: (_validate_RepeatedRules); - 'map'?: (_validate_MapRules); + 'enum'?: (_validate_EnumRules | null); + 'message'?: (_validate_MessageRules | null); + 'repeated'?: (_validate_RepeatedRules | null); + 'map'?: (_validate_MapRules | null); /** * Well-Known Field Types */ - 'any'?: (_validate_AnyRules); - 'duration'?: (_validate_DurationRules); - 'timestamp'?: (_validate_TimestampRules); + 'any'?: (_validate_AnyRules | null); + 'duration'?: (_validate_DurationRules | null); + 'timestamp'?: (_validate_TimestampRules | null); 'type'?: "float"|"double"|"int32"|"int64"|"uint32"|"uint64"|"sint32"|"sint64"|"fixed32"|"fixed64"|"sfixed32"|"sfixed64"|"bool"|"string"|"bytes"|"enum"|"repeated"|"map"|"any"|"duration"|"timestamp"; } @@ -71,33 +70,33 @@ export interface FieldRules__Output { /** * Scalar Field Types */ - 'float'?: (_validate_FloatRules__Output); - 'double'?: (_validate_DoubleRules__Output); - 'int32'?: (_validate_Int32Rules__Output); - 'int64'?: (_validate_Int64Rules__Output); - 'uint32'?: (_validate_UInt32Rules__Output); - 'uint64'?: (_validate_UInt64Rules__Output); - 'sint32'?: (_validate_SInt32Rules__Output); - 'sint64'?: (_validate_SInt64Rules__Output); - 'fixed32'?: (_validate_Fixed32Rules__Output); - 'fixed64'?: (_validate_Fixed64Rules__Output); - 'sfixed32'?: (_validate_SFixed32Rules__Output); - 'sfixed64'?: (_validate_SFixed64Rules__Output); - 'bool'?: (_validate_BoolRules__Output); - 'string'?: (_validate_StringRules__Output); - 'bytes'?: (_validate_BytesRules__Output); + 'float'?: (_validate_FloatRules__Output | null); + 'double'?: (_validate_DoubleRules__Output | null); + 'int32'?: (_validate_Int32Rules__Output | null); + 'int64'?: (_validate_Int64Rules__Output | null); + 'uint32'?: (_validate_UInt32Rules__Output | null); + 'uint64'?: (_validate_UInt64Rules__Output | null); + 'sint32'?: (_validate_SInt32Rules__Output | null); + 'sint64'?: (_validate_SInt64Rules__Output | null); + 'fixed32'?: (_validate_Fixed32Rules__Output | null); + 'fixed64'?: (_validate_Fixed64Rules__Output | null); + 'sfixed32'?: (_validate_SFixed32Rules__Output | null); + 'sfixed64'?: (_validate_SFixed64Rules__Output | null); + 'bool'?: (_validate_BoolRules__Output | null); + 'string'?: (_validate_StringRules__Output | null); + 'bytes'?: (_validate_BytesRules__Output | null); /** * Complex Field Types */ - 'enum'?: (_validate_EnumRules__Output); - 'message'?: (_validate_MessageRules__Output); - 'repeated'?: (_validate_RepeatedRules__Output); - 'map'?: (_validate_MapRules__Output); + 'enum'?: (_validate_EnumRules__Output | null); + 'message': (_validate_MessageRules__Output | null); + 'repeated'?: (_validate_RepeatedRules__Output | null); + 'map'?: (_validate_MapRules__Output | null); /** * Well-Known Field Types */ - 'any'?: (_validate_AnyRules__Output); - 'duration'?: (_validate_DurationRules__Output); - 'timestamp'?: (_validate_TimestampRules__Output); + 'any'?: (_validate_AnyRules__Output | null); + 'duration'?: (_validate_DurationRules__Output | null); + 'timestamp'?: (_validate_TimestampRules__Output | null); 'type': "float"|"double"|"int32"|"int64"|"uint32"|"uint64"|"sint32"|"sint64"|"fixed32"|"fixed64"|"sfixed32"|"sfixed64"|"bool"|"string"|"bytes"|"enum"|"repeated"|"map"|"any"|"duration"|"timestamp"; } diff --git a/packages/grpc-js-xds/src/generated/validate/FloatRules.ts b/packages/grpc-js-xds/src/generated/validate/FloatRules.ts index 35aafa809..8d5244c2b 100644 --- a/packages/grpc-js-xds/src/generated/validate/FloatRules.ts +++ b/packages/grpc-js-xds/src/generated/validate/FloatRules.ts @@ -50,37 +50,37 @@ export interface FloatRules__Output { /** * Const specifies that this field must be exactly the specified value */ - 'const': (number | string); + 'const': (number); /** * Lt specifies that this field must be less than the specified value, * exclusive */ - 'lt': (number | string); + 'lt': (number); /** * Lte specifies that this field must be less than or equal to the * specified value, inclusive */ - 'lte': (number | string); + 'lte': (number); /** * Gt specifies that this field must be greater than the specified value, * exclusive. If the value of Gt is larger than a specified Lt or Lte, the * range is reversed. */ - 'gt': (number | string); + 'gt': (number); /** * Gte specifies that this field must be greater than or equal to the * specified value, inclusive. If the value of Gte is larger than a * specified Lt or Lte, the range is reversed. */ - 'gte': (number | string); + 'gte': (number); /** * In specifies that this field must be equal to one of the specified * values */ - 'in': (number | string)[]; + 'in': (number)[]; /** * NotIn specifies that this field cannot be equal to one of the specified * values */ - 'not_in': (number | string)[]; + 'not_in': (number)[]; } diff --git a/packages/grpc-js-xds/src/generated/validate/KnownRegex.ts b/packages/grpc-js-xds/src/generated/validate/KnownRegex.ts index 5880b5baf..8f1e20b4c 100644 --- a/packages/grpc-js-xds/src/generated/validate/KnownRegex.ts +++ b/packages/grpc-js-xds/src/generated/validate/KnownRegex.ts @@ -3,14 +3,36 @@ /** * WellKnownRegex contain some well-known patterns. */ -export enum KnownRegex { - UNKNOWN = 0, +export const KnownRegex = { + UNKNOWN: 'UNKNOWN', /** * HTTP header name as defined by RFC 7230. */ - HTTP_HEADER_NAME = 1, + HTTP_HEADER_NAME: 'HTTP_HEADER_NAME', /** * HTTP header value as defined by RFC 7230. */ - HTTP_HEADER_VALUE = 2, -} + HTTP_HEADER_VALUE: 'HTTP_HEADER_VALUE', +} as const; + +/** + * WellKnownRegex contain some well-known patterns. + */ +export type KnownRegex = + | 'UNKNOWN' + | 0 + /** + * HTTP header name as defined by RFC 7230. + */ + | 'HTTP_HEADER_NAME' + | 1 + /** + * HTTP header value as defined by RFC 7230. + */ + | 'HTTP_HEADER_VALUE' + | 2 + +/** + * WellKnownRegex contain some well-known patterns. + */ +export type KnownRegex__Output = typeof KnownRegex[keyof typeof KnownRegex] diff --git a/packages/grpc-js-xds/src/generated/validate/MapRules.ts b/packages/grpc-js-xds/src/generated/validate/MapRules.ts index 0c89bf2b3..7efe5b390 100644 --- a/packages/grpc-js-xds/src/generated/validate/MapRules.ts +++ b/packages/grpc-js-xds/src/generated/validate/MapRules.ts @@ -25,13 +25,13 @@ export interface MapRules { /** * Keys specifies the constraints to be applied to each key in the field. */ - 'keys'?: (_validate_FieldRules); + 'keys'?: (_validate_FieldRules | null); /** * Values specifies the constraints to be applied to the value of each key * in the field. Message values will still have their validations evaluated * unless skip is specified here. */ - 'values'?: (_validate_FieldRules); + 'values'?: (_validate_FieldRules | null); } /** @@ -56,11 +56,11 @@ export interface MapRules__Output { /** * Keys specifies the constraints to be applied to each key in the field. */ - 'keys'?: (_validate_FieldRules__Output); + 'keys': (_validate_FieldRules__Output | null); /** * Values specifies the constraints to be applied to the value of each key * in the field. Message values will still have their validations evaluated * unless skip is specified here. */ - 'values'?: (_validate_FieldRules__Output); + 'values': (_validate_FieldRules__Output | null); } diff --git a/packages/grpc-js-xds/src/generated/validate/RepeatedRules.ts b/packages/grpc-js-xds/src/generated/validate/RepeatedRules.ts index 1f6d4f0ff..d347b6497 100644 --- a/packages/grpc-js-xds/src/generated/validate/RepeatedRules.ts +++ b/packages/grpc-js-xds/src/generated/validate/RepeatedRules.ts @@ -28,7 +28,7 @@ export interface RepeatedRules { * Repeated message fields will still execute validation against each item * unless skip is specified here. */ - 'items'?: (_validate_FieldRules); + 'items'?: (_validate_FieldRules | null); } /** @@ -56,5 +56,5 @@ export interface RepeatedRules__Output { * Repeated message fields will still execute validation against each item * unless skip is specified here. */ - 'items'?: (_validate_FieldRules__Output); + 'items': (_validate_FieldRules__Output | null); } diff --git a/packages/grpc-js-xds/src/generated/validate/StringRules.ts b/packages/grpc-js-xds/src/generated/validate/StringRules.ts index b6bb1e460..8bca6dffa 100644 --- a/packages/grpc-js-xds/src/generated/validate/StringRules.ts +++ b/packages/grpc-js-xds/src/generated/validate/StringRules.ts @@ -1,6 +1,6 @@ // Original file: deps/protoc-gen-validate/validate/validate.proto -import type { KnownRegex as _validate_KnownRegex } from '../validate/KnownRegex'; +import type { KnownRegex as _validate_KnownRegex, KnownRegex__Output as _validate_KnownRegex__Output } from '../validate/KnownRegex'; import type { Long } from '@grpc/proto-loader'; /** @@ -129,7 +129,7 @@ export interface StringRules { /** * WellKnownRegex specifies a common well known pattern defined as a regex. */ - 'well_known_regex'?: (_validate_KnownRegex | keyof typeof _validate_KnownRegex); + 'well_known_regex'?: (_validate_KnownRegex); /** * This applies to regexes HTTP_HEADER_NAME and HTTP_HEADER_VALUE to enable * strict header validation. @@ -271,7 +271,7 @@ export interface StringRules__Output { /** * WellKnownRegex specifies a common well known pattern defined as a regex. */ - 'well_known_regex'?: (keyof typeof _validate_KnownRegex); + 'well_known_regex'?: (_validate_KnownRegex__Output); /** * This applies to regexes HTTP_HEADER_NAME and HTTP_HEADER_VALUE to enable * strict header validation. diff --git a/packages/grpc-js-xds/src/generated/validate/TimestampRules.ts b/packages/grpc-js-xds/src/generated/validate/TimestampRules.ts index 9436cc8ee..69d548126 100644 --- a/packages/grpc-js-xds/src/generated/validate/TimestampRules.ts +++ b/packages/grpc-js-xds/src/generated/validate/TimestampRules.ts @@ -15,27 +15,27 @@ export interface TimestampRules { /** * Const specifies that this field must be exactly the specified value */ - 'const'?: (_google_protobuf_Timestamp); + 'const'?: (_google_protobuf_Timestamp | null); /** * Lt specifies that this field must be less than the specified value, * exclusive */ - 'lt'?: (_google_protobuf_Timestamp); + 'lt'?: (_google_protobuf_Timestamp | null); /** * Lte specifies that this field must be less than the specified value, * inclusive */ - 'lte'?: (_google_protobuf_Timestamp); + 'lte'?: (_google_protobuf_Timestamp | null); /** * Gt specifies that this field must be greater than the specified value, * exclusive */ - 'gt'?: (_google_protobuf_Timestamp); + 'gt'?: (_google_protobuf_Timestamp | null); /** * Gte specifies that this field must be greater than the specified value, * inclusive */ - 'gte'?: (_google_protobuf_Timestamp); + 'gte'?: (_google_protobuf_Timestamp | null); /** * LtNow specifies that this must be less than the current time. LtNow * can only be used with the Within rule. @@ -51,7 +51,7 @@ export interface TimestampRules { * current time. This constraint can be used alone or with the LtNow and * GtNow rules. */ - 'within'?: (_google_protobuf_Duration); + 'within'?: (_google_protobuf_Duration | null); } /** @@ -66,27 +66,27 @@ export interface TimestampRules__Output { /** * Const specifies that this field must be exactly the specified value */ - 'const'?: (_google_protobuf_Timestamp__Output); + 'const': (_google_protobuf_Timestamp__Output | null); /** * Lt specifies that this field must be less than the specified value, * exclusive */ - 'lt'?: (_google_protobuf_Timestamp__Output); + 'lt': (_google_protobuf_Timestamp__Output | null); /** * Lte specifies that this field must be less than the specified value, * inclusive */ - 'lte'?: (_google_protobuf_Timestamp__Output); + 'lte': (_google_protobuf_Timestamp__Output | null); /** * Gt specifies that this field must be greater than the specified value, * exclusive */ - 'gt'?: (_google_protobuf_Timestamp__Output); + 'gt': (_google_protobuf_Timestamp__Output | null); /** * Gte specifies that this field must be greater than the specified value, * inclusive */ - 'gte'?: (_google_protobuf_Timestamp__Output); + 'gte': (_google_protobuf_Timestamp__Output | null); /** * LtNow specifies that this must be less than the current time. LtNow * can only be used with the Within rule. @@ -102,5 +102,5 @@ export interface TimestampRules__Output { * current time. This constraint can be used alone or with the LtNow and * GtNow rules. */ - 'within'?: (_google_protobuf_Duration__Output); + 'within': (_google_protobuf_Duration__Output | null); } diff --git a/packages/grpc-js-xds/src/generated/wrr_locality.ts b/packages/grpc-js-xds/src/generated/wrr_locality.ts new file mode 100644 index 000000000..e0275ef9a --- /dev/null +++ b/packages/grpc-js-xds/src/generated/wrr_locality.ts @@ -0,0 +1,231 @@ +import type * as grpc from '@grpc/grpc-js'; +import type { EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; + + +type SubtypeConstructor any, Subtype> = { + new(...args: ConstructorParameters): Subtype; +}; + +export interface ProtoGrpcType { + envoy: { + annotations: { + } + config: { + cluster: { + v3: { + CircuitBreakers: MessageTypeDefinition + Cluster: MessageTypeDefinition + ClusterCollection: MessageTypeDefinition + Filter: MessageTypeDefinition + LoadBalancingPolicy: MessageTypeDefinition + OutlierDetection: MessageTypeDefinition + TrackClusterStats: MessageTypeDefinition + UpstreamConnectionOptions: MessageTypeDefinition + } + } + core: { + v3: { + Address: MessageTypeDefinition + AggregatedConfigSource: MessageTypeDefinition + AlternateProtocolsCacheOptions: MessageTypeDefinition + ApiConfigSource: MessageTypeDefinition + ApiVersion: EnumTypeDefinition + AsyncDataSource: MessageTypeDefinition + BackoffStrategy: MessageTypeDefinition + BindConfig: MessageTypeDefinition + BuildVersion: MessageTypeDefinition + CidrRange: MessageTypeDefinition + ConfigSource: MessageTypeDefinition + ControlPlane: MessageTypeDefinition + DataSource: MessageTypeDefinition + DnsResolutionConfig: MessageTypeDefinition + DnsResolverOptions: MessageTypeDefinition + EnvoyInternalAddress: MessageTypeDefinition + EventServiceConfig: MessageTypeDefinition + Extension: MessageTypeDefinition + ExtensionConfigSource: MessageTypeDefinition + ExtraSourceAddress: MessageTypeDefinition + GrpcProtocolOptions: MessageTypeDefinition + GrpcService: MessageTypeDefinition + HeaderMap: MessageTypeDefinition + HeaderValue: MessageTypeDefinition + HeaderValueOption: MessageTypeDefinition + HealthCheck: MessageTypeDefinition + HealthStatus: EnumTypeDefinition + HealthStatusSet: MessageTypeDefinition + Http1ProtocolOptions: MessageTypeDefinition + Http2ProtocolOptions: MessageTypeDefinition + Http3ProtocolOptions: MessageTypeDefinition + HttpProtocolOptions: MessageTypeDefinition + HttpUri: MessageTypeDefinition + KeepaliveSettings: MessageTypeDefinition + Locality: MessageTypeDefinition + Metadata: MessageTypeDefinition + Node: MessageTypeDefinition + PathConfigSource: MessageTypeDefinition + Pipe: MessageTypeDefinition + QueryParameter: MessageTypeDefinition + QuicKeepAliveSettings: MessageTypeDefinition + QuicProtocolOptions: MessageTypeDefinition + RateLimitSettings: MessageTypeDefinition + RemoteDataSource: MessageTypeDefinition + RequestMethod: EnumTypeDefinition + RetryPolicy: MessageTypeDefinition + RoutingPriority: EnumTypeDefinition + RuntimeDouble: MessageTypeDefinition + RuntimeFeatureFlag: MessageTypeDefinition + RuntimeFractionalPercent: MessageTypeDefinition + RuntimePercent: MessageTypeDefinition + RuntimeUInt32: MessageTypeDefinition + SchemeHeaderTransformation: MessageTypeDefinition + SelfConfigSource: MessageTypeDefinition + SocketAddress: MessageTypeDefinition + SocketOption: MessageTypeDefinition + SocketOptionsOverride: MessageTypeDefinition + TcpKeepalive: MessageTypeDefinition + TcpProtocolOptions: MessageTypeDefinition + TrafficDirection: EnumTypeDefinition + TransportSocket: MessageTypeDefinition + TypedExtensionConfig: MessageTypeDefinition + UpstreamHttpProtocolOptions: MessageTypeDefinition + WatchedDirectory: MessageTypeDefinition + } + } + endpoint: { + v3: { + ClusterLoadAssignment: MessageTypeDefinition + Endpoint: MessageTypeDefinition + LbEndpoint: MessageTypeDefinition + LedsClusterLocalityConfig: MessageTypeDefinition + LocalityLbEndpoints: MessageTypeDefinition + } + } + } + extensions: { + load_balancing_policies: { + wrr_locality: { + v3: { + WrrLocality: MessageTypeDefinition + } + } + } + } + type: { + matcher: { + v3: { + ListStringMatcher: MessageTypeDefinition + RegexMatchAndSubstitute: MessageTypeDefinition + RegexMatcher: MessageTypeDefinition + StringMatcher: MessageTypeDefinition + } + } + v3: { + CodecClientType: EnumTypeDefinition + DoubleRange: MessageTypeDefinition + FractionalPercent: MessageTypeDefinition + Int32Range: MessageTypeDefinition + Int64Range: MessageTypeDefinition + Percent: MessageTypeDefinition + SemanticVersion: MessageTypeDefinition + } + } + } + google: { + protobuf: { + Any: MessageTypeDefinition + BoolValue: MessageTypeDefinition + BytesValue: MessageTypeDefinition + DescriptorProto: MessageTypeDefinition + DoubleValue: MessageTypeDefinition + Duration: MessageTypeDefinition + Empty: MessageTypeDefinition + EnumDescriptorProto: MessageTypeDefinition + EnumOptions: MessageTypeDefinition + EnumValueDescriptorProto: MessageTypeDefinition + EnumValueOptions: MessageTypeDefinition + FieldDescriptorProto: MessageTypeDefinition + FieldOptions: MessageTypeDefinition + FileDescriptorProto: MessageTypeDefinition + FileDescriptorSet: MessageTypeDefinition + FileOptions: MessageTypeDefinition + FloatValue: MessageTypeDefinition + GeneratedCodeInfo: MessageTypeDefinition + Int32Value: MessageTypeDefinition + Int64Value: MessageTypeDefinition + ListValue: MessageTypeDefinition + MessageOptions: MessageTypeDefinition + MethodDescriptorProto: MessageTypeDefinition + MethodOptions: MessageTypeDefinition + NullValue: EnumTypeDefinition + OneofDescriptorProto: MessageTypeDefinition + OneofOptions: MessageTypeDefinition + ServiceDescriptorProto: MessageTypeDefinition + ServiceOptions: MessageTypeDefinition + SourceCodeInfo: MessageTypeDefinition + StringValue: MessageTypeDefinition + Struct: MessageTypeDefinition + Timestamp: MessageTypeDefinition + UInt32Value: MessageTypeDefinition + UInt64Value: MessageTypeDefinition + UninterpretedOption: MessageTypeDefinition + Value: MessageTypeDefinition + } + } + udpa: { + annotations: { + FieldMigrateAnnotation: MessageTypeDefinition + FieldSecurityAnnotation: MessageTypeDefinition + FileMigrateAnnotation: MessageTypeDefinition + MigrateAnnotation: MessageTypeDefinition + PackageVersionStatus: EnumTypeDefinition + StatusAnnotation: MessageTypeDefinition + VersioningAnnotation: MessageTypeDefinition + } + } + validate: { + AnyRules: MessageTypeDefinition + BoolRules: MessageTypeDefinition + BytesRules: MessageTypeDefinition + DoubleRules: MessageTypeDefinition + DurationRules: MessageTypeDefinition + EnumRules: MessageTypeDefinition + FieldRules: MessageTypeDefinition + Fixed32Rules: MessageTypeDefinition + Fixed64Rules: MessageTypeDefinition + FloatRules: MessageTypeDefinition + Int32Rules: MessageTypeDefinition + Int64Rules: MessageTypeDefinition + KnownRegex: EnumTypeDefinition + MapRules: MessageTypeDefinition + MessageRules: MessageTypeDefinition + RepeatedRules: MessageTypeDefinition + SFixed32Rules: MessageTypeDefinition + SFixed64Rules: MessageTypeDefinition + SInt32Rules: MessageTypeDefinition + SInt64Rules: MessageTypeDefinition + StringRules: MessageTypeDefinition + TimestampRules: MessageTypeDefinition + UInt32Rules: MessageTypeDefinition + UInt64Rules: MessageTypeDefinition + } + xds: { + annotations: { + v3: { + FieldStatusAnnotation: MessageTypeDefinition + FileStatusAnnotation: MessageTypeDefinition + MessageStatusAnnotation: MessageTypeDefinition + PackageVersionStatus: EnumTypeDefinition + StatusAnnotation: MessageTypeDefinition + } + } + core: { + v3: { + Authority: MessageTypeDefinition + CollectionEntry: MessageTypeDefinition + ContextParams: MessageTypeDefinition + ResourceLocator: MessageTypeDefinition + } + } + } +} + diff --git a/packages/grpc-js-xds/src/generated/xds/annotations/v3/FieldStatusAnnotation.ts b/packages/grpc-js-xds/src/generated/xds/annotations/v3/FieldStatusAnnotation.ts new file mode 100644 index 000000000..744e13837 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/xds/annotations/v3/FieldStatusAnnotation.ts @@ -0,0 +1,16 @@ +// Original file: deps/xds/xds/annotations/v3/status.proto + + +export interface FieldStatusAnnotation { + /** + * The entity is work-in-progress and subject to breaking changes. + */ + 'work_in_progress'?: (boolean); +} + +export interface FieldStatusAnnotation__Output { + /** + * The entity is work-in-progress and subject to breaking changes. + */ + 'work_in_progress': (boolean); +} diff --git a/packages/grpc-js-xds/src/generated/xds/annotations/v3/FileStatusAnnotation.ts b/packages/grpc-js-xds/src/generated/xds/annotations/v3/FileStatusAnnotation.ts new file mode 100644 index 000000000..cbc3ab3f9 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/xds/annotations/v3/FileStatusAnnotation.ts @@ -0,0 +1,16 @@ +// Original file: deps/xds/xds/annotations/v3/status.proto + + +export interface FileStatusAnnotation { + /** + * The entity is work-in-progress and subject to breaking changes. + */ + 'work_in_progress'?: (boolean); +} + +export interface FileStatusAnnotation__Output { + /** + * The entity is work-in-progress and subject to breaking changes. + */ + 'work_in_progress': (boolean); +} diff --git a/packages/grpc-js-xds/src/generated/xds/annotations/v3/MessageStatusAnnotation.ts b/packages/grpc-js-xds/src/generated/xds/annotations/v3/MessageStatusAnnotation.ts new file mode 100644 index 000000000..f403f6506 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/xds/annotations/v3/MessageStatusAnnotation.ts @@ -0,0 +1,16 @@ +// Original file: deps/xds/xds/annotations/v3/status.proto + + +export interface MessageStatusAnnotation { + /** + * The entity is work-in-progress and subject to breaking changes. + */ + 'work_in_progress'?: (boolean); +} + +export interface MessageStatusAnnotation__Output { + /** + * The entity is work-in-progress and subject to breaking changes. + */ + 'work_in_progress': (boolean); +} diff --git a/packages/grpc-js-xds/src/generated/xds/annotations/v3/PackageVersionStatus.ts b/packages/grpc-js-xds/src/generated/xds/annotations/v3/PackageVersionStatus.ts new file mode 100644 index 000000000..e85074eae --- /dev/null +++ b/packages/grpc-js-xds/src/generated/xds/annotations/v3/PackageVersionStatus.ts @@ -0,0 +1,46 @@ +// Original file: deps/xds/xds/annotations/v3/status.proto + +export const PackageVersionStatus = { + /** + * Unknown package version status. + */ + UNKNOWN: 'UNKNOWN', + /** + * This version of the package is frozen. + */ + FROZEN: 'FROZEN', + /** + * This version of the package is the active development version. + */ + ACTIVE: 'ACTIVE', + /** + * This version of the package is the candidate for the next major version. It + * is typically machine generated from the active development version. + */ + NEXT_MAJOR_VERSION_CANDIDATE: 'NEXT_MAJOR_VERSION_CANDIDATE', +} as const; + +export type PackageVersionStatus = + /** + * Unknown package version status. + */ + | 'UNKNOWN' + | 0 + /** + * This version of the package is frozen. + */ + | 'FROZEN' + | 1 + /** + * This version of the package is the active development version. + */ + | 'ACTIVE' + | 2 + /** + * This version of the package is the candidate for the next major version. It + * is typically machine generated from the active development version. + */ + | 'NEXT_MAJOR_VERSION_CANDIDATE' + | 3 + +export type PackageVersionStatus__Output = typeof PackageVersionStatus[keyof typeof PackageVersionStatus] diff --git a/packages/grpc-js-xds/src/generated/xds/annotations/v3/StatusAnnotation.ts b/packages/grpc-js-xds/src/generated/xds/annotations/v3/StatusAnnotation.ts new file mode 100644 index 000000000..678d6a6bf --- /dev/null +++ b/packages/grpc-js-xds/src/generated/xds/annotations/v3/StatusAnnotation.ts @@ -0,0 +1,25 @@ +// Original file: deps/xds/xds/annotations/v3/status.proto + +import type { PackageVersionStatus as _xds_annotations_v3_PackageVersionStatus, PackageVersionStatus__Output as _xds_annotations_v3_PackageVersionStatus__Output } from '../../../xds/annotations/v3/PackageVersionStatus'; + +export interface StatusAnnotation { + /** + * The entity is work-in-progress and subject to breaking changes. + */ + 'work_in_progress'?: (boolean); + /** + * The entity belongs to a package with the given version status. + */ + 'package_version_status'?: (_xds_annotations_v3_PackageVersionStatus); +} + +export interface StatusAnnotation__Output { + /** + * The entity is work-in-progress and subject to breaking changes. + */ + 'work_in_progress': (boolean); + /** + * The entity belongs to a package with the given version status. + */ + 'package_version_status': (_xds_annotations_v3_PackageVersionStatus__Output); +} diff --git a/packages/grpc-js-xds/src/generated/xds/core/v3/Authority.ts b/packages/grpc-js-xds/src/generated/xds/core/v3/Authority.ts new file mode 100644 index 000000000..9505731d7 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/xds/core/v3/Authority.ts @@ -0,0 +1,16 @@ +// Original file: deps/xds/xds/core/v3/authority.proto + + +/** + * xDS authority information. + */ +export interface Authority { + 'name'?: (string); +} + +/** + * xDS authority information. + */ +export interface Authority__Output { + 'name': (string); +} diff --git a/packages/grpc-js-xds/src/generated/xds/core/v3/CollectionEntry.ts b/packages/grpc-js-xds/src/generated/xds/core/v3/CollectionEntry.ts new file mode 100644 index 000000000..5d2ce9721 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/xds/core/v3/CollectionEntry.ts @@ -0,0 +1,90 @@ +// Original file: deps/xds/xds/core/v3/collection_entry.proto + +import type { ResourceLocator as _xds_core_v3_ResourceLocator, ResourceLocator__Output as _xds_core_v3_ResourceLocator__Output } from '../../../xds/core/v3/ResourceLocator'; +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../google/protobuf/Any'; + +/** + * Inlined resource entry. + */ +export interface _xds_core_v3_CollectionEntry_InlineEntry { + /** + * Optional name to describe the inlined resource. Resource names must + * [a-zA-Z0-9_-\./]+ (TODO(htuch): turn this into a PGV constraint once + * finalized, probably should be a RFC3986 pchar). This name allows + * reference via the #entry directive in ResourceLocator. + */ + 'name'?: (string); + /** + * The resource's logical version. It is illegal to have the same named xDS + * resource name at a given version with different resource payloads. + */ + 'version'?: (string); + /** + * The resource payload, including type URL. + */ + 'resource'?: (_google_protobuf_Any | null); +} + +/** + * Inlined resource entry. + */ +export interface _xds_core_v3_CollectionEntry_InlineEntry__Output { + /** + * Optional name to describe the inlined resource. Resource names must + * [a-zA-Z0-9_-\./]+ (TODO(htuch): turn this into a PGV constraint once + * finalized, probably should be a RFC3986 pchar). This name allows + * reference via the #entry directive in ResourceLocator. + */ + 'name': (string); + /** + * The resource's logical version. It is illegal to have the same named xDS + * resource name at a given version with different resource payloads. + */ + 'version': (string); + /** + * The resource payload, including type URL. + */ + 'resource': (_google_protobuf_Any__Output | null); +} + +/** + * xDS collection resource wrapper. This encapsulates a xDS resource when + * appearing inside a list collection resource. List collection resources are + * regular Resource messages of type: + * + * message Collection { + * repeated CollectionEntry resources = 1; + * } + */ +export interface CollectionEntry { + /** + * A resource locator describing how the member resource is to be located. + */ + 'locator'?: (_xds_core_v3_ResourceLocator | null); + /** + * The resource is inlined in the list collection. + */ + 'inline_entry'?: (_xds_core_v3_CollectionEntry_InlineEntry | null); + 'resource_specifier'?: "locator"|"inline_entry"; +} + +/** + * xDS collection resource wrapper. This encapsulates a xDS resource when + * appearing inside a list collection resource. List collection resources are + * regular Resource messages of type: + * + * message Collection { + * repeated CollectionEntry resources = 1; + * } + */ +export interface CollectionEntry__Output { + /** + * A resource locator describing how the member resource is to be located. + */ + 'locator'?: (_xds_core_v3_ResourceLocator__Output | null); + /** + * The resource is inlined in the list collection. + */ + 'inline_entry'?: (_xds_core_v3_CollectionEntry_InlineEntry__Output | null); + 'resource_specifier': "locator"|"inline_entry"; +} diff --git a/packages/grpc-js-xds/src/generated/xds/core/v3/ContextParams.ts b/packages/grpc-js-xds/src/generated/xds/core/v3/ContextParams.ts new file mode 100644 index 000000000..19a8a99bb --- /dev/null +++ b/packages/grpc-js-xds/src/generated/xds/core/v3/ContextParams.ts @@ -0,0 +1,26 @@ +// Original file: deps/xds/xds/core/v3/context_params.proto + + +/** + * Additional parameters that can be used to select resource variants. These include any + * global context parameters, per-resource type client feature capabilities and per-resource + * type functional attributes. All per-resource type attributes will be `xds.resource.` + * prefixed and some of these are documented below: + * `xds.resource.listening_address`: The value is "IP:port" (e.g. "10.1.1.3:8080") which is + * the listening address of a Listener. Used in a Listener resource query. + */ +export interface ContextParams { + 'params'?: ({[key: string]: string}); +} + +/** + * Additional parameters that can be used to select resource variants. These include any + * global context parameters, per-resource type client feature capabilities and per-resource + * type functional attributes. All per-resource type attributes will be `xds.resource.` + * prefixed and some of these are documented below: + * `xds.resource.listening_address`: The value is "IP:port" (e.g. "10.1.1.3:8080") which is + * the listening address of a Listener. Used in a Listener resource query. + */ +export interface ContextParams__Output { + 'params': ({[key: string]: string}); +} diff --git a/packages/grpc-js-xds/src/generated/xds/core/v3/ResourceLocator.ts b/packages/grpc-js-xds/src/generated/xds/core/v3/ResourceLocator.ts new file mode 100644 index 000000000..28f981dd5 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/xds/core/v3/ResourceLocator.ts @@ -0,0 +1,230 @@ +// Original file: deps/xds/xds/core/v3/resource_locator.proto + +import type { ContextParams as _xds_core_v3_ContextParams, ContextParams__Output as _xds_core_v3_ContextParams__Output } from '../../../xds/core/v3/ContextParams'; +import type { ResourceLocator as _xds_core_v3_ResourceLocator, ResourceLocator__Output as _xds_core_v3_ResourceLocator__Output } from '../../../xds/core/v3/ResourceLocator'; + +/** + * Directives provide information to data-plane load balancers on how xDS + * resource names are to be interpreted and potentially further resolved. For + * example, they may provide alternative resource locators for when primary + * resolution fails. Directives are not part of resource names and do not + * appear in a xDS transport discovery request. + * + * When encoding to URIs, directives take the form: + * + * = + * + * For example, we can have alt=xdstp://foo/bar or entry=some%20thing. Each + * directive value type may have its own string encoding, in the case of + * ResourceLocator there is a recursive URI encoding. + * + * Percent encoding applies to the URI encoding of the directive value. + * Multiple directives are comma-separated, so the reserved characters that + * require percent encoding in a directive value are [',', '#', '[', ']', + * '%']. These are the RFC3986 fragment reserved characters with the addition + * of the xDS scheme specific ','. See + * https://tools.ietf.org/html/rfc3986#page-49 for further details on URI ABNF + * and reserved characters. + */ +export interface _xds_core_v3_ResourceLocator_Directive { + /** + * An alternative resource locator for fallback if the resource is + * unavailable. For example, take the resource locator: + * + * xdstp://foo/some-type/some-route-table#alt=xdstp://bar/some-type/another-route-table + * + * If the data-plane load balancer is unable to reach `foo` to fetch the + * resource, it will fallback to `bar`. Alternative resources do not need + * to have equivalent content, but they should be functional substitutes. + */ + 'alt'?: (_xds_core_v3_ResourceLocator | null); + /** + * List collections support inlining of resources via the entry field in + * Resource. These inlined Resource objects may have an optional name + * field specified. When specified, the entry directive allows + * ResourceLocator to directly reference these inlined resources, e.g. + * xdstp://.../foo#entry=bar. + */ + 'entry'?: (string); + 'directive'?: "alt"|"entry"; +} + +/** + * Directives provide information to data-plane load balancers on how xDS + * resource names are to be interpreted and potentially further resolved. For + * example, they may provide alternative resource locators for when primary + * resolution fails. Directives are not part of resource names and do not + * appear in a xDS transport discovery request. + * + * When encoding to URIs, directives take the form: + * + * = + * + * For example, we can have alt=xdstp://foo/bar or entry=some%20thing. Each + * directive value type may have its own string encoding, in the case of + * ResourceLocator there is a recursive URI encoding. + * + * Percent encoding applies to the URI encoding of the directive value. + * Multiple directives are comma-separated, so the reserved characters that + * require percent encoding in a directive value are [',', '#', '[', ']', + * '%']. These are the RFC3986 fragment reserved characters with the addition + * of the xDS scheme specific ','. See + * https://tools.ietf.org/html/rfc3986#page-49 for further details on URI ABNF + * and reserved characters. + */ +export interface _xds_core_v3_ResourceLocator_Directive__Output { + /** + * An alternative resource locator for fallback if the resource is + * unavailable. For example, take the resource locator: + * + * xdstp://foo/some-type/some-route-table#alt=xdstp://bar/some-type/another-route-table + * + * If the data-plane load balancer is unable to reach `foo` to fetch the + * resource, it will fallback to `bar`. Alternative resources do not need + * to have equivalent content, but they should be functional substitutes. + */ + 'alt'?: (_xds_core_v3_ResourceLocator__Output | null); + /** + * List collections support inlining of resources via the entry field in + * Resource. These inlined Resource objects may have an optional name + * field specified. When specified, the entry directive allows + * ResourceLocator to directly reference these inlined resources, e.g. + * xdstp://.../foo#entry=bar. + */ + 'entry'?: (string); + 'directive': "alt"|"entry"; +} + +// Original file: deps/xds/xds/core/v3/resource_locator.proto + +export const _xds_core_v3_ResourceLocator_Scheme = { + XDSTP: 'XDSTP', + HTTP: 'HTTP', + FILE: 'FILE', +} as const; + +export type _xds_core_v3_ResourceLocator_Scheme = + | 'XDSTP' + | 0 + | 'HTTP' + | 1 + | 'FILE' + | 2 + +export type _xds_core_v3_ResourceLocator_Scheme__Output = typeof _xds_core_v3_ResourceLocator_Scheme[keyof typeof _xds_core_v3_ResourceLocator_Scheme] + +/** + * xDS resource locators identify a xDS resource name and instruct the + * data-plane load balancer on how the resource may be located. + * + * Resource locators have a canonical xdstp:// URI representation: + * + * xdstp://{authority}/{type_url}/{id}?{context_params}{#directive,*} + * + * where context_params take the form of URI query parameters. + * + * Resource locators have a similar canonical http:// URI representation: + * + * http://{authority}/{type_url}/{id}?{context_params}{#directive,*} + * + * Resource locators also have a simplified file:// URI representation: + * + * file:///{id}{#directive,*} + */ +export interface ResourceLocator { + /** + * URI scheme. + */ + 'scheme'?: (_xds_core_v3_ResourceLocator_Scheme); + /** + * Opaque identifier for the resource. Any '/' will not be escaped during URI + * encoding and will form part of the URI path. This may end + * with ‘*’ for glob collection references. + */ + 'id'?: (string); + /** + * Logical authority for resource (not necessarily transport network address). + * Authorities are opaque in the xDS API, data-plane load balancers will map + * them to concrete network transports such as an xDS management server, e.g. + * via envoy.config.core.v3.ConfigSource. + */ + 'authority'?: (string); + /** + * Fully qualified resource type (as in type URL without types.googleapis.com/ + * prefix). + */ + 'resource_type'?: (string); + /** + * Additional parameters that can be used to select resource variants. + * Matches must be exact, i.e. all context parameters must match exactly and + * there must be no additional context parameters set on the matched + * resource. + */ + 'exact_context'?: (_xds_core_v3_ContextParams | null); + /** + * A list of directives that appear in the xDS resource locator #fragment. + * + * When encoding to URI form, directives are percent encoded with comma + * separation. + */ + 'directives'?: (_xds_core_v3_ResourceLocator_Directive)[]; + 'context_param_specifier'?: "exact_context"; +} + +/** + * xDS resource locators identify a xDS resource name and instruct the + * data-plane load balancer on how the resource may be located. + * + * Resource locators have a canonical xdstp:// URI representation: + * + * xdstp://{authority}/{type_url}/{id}?{context_params}{#directive,*} + * + * where context_params take the form of URI query parameters. + * + * Resource locators have a similar canonical http:// URI representation: + * + * http://{authority}/{type_url}/{id}?{context_params}{#directive,*} + * + * Resource locators also have a simplified file:// URI representation: + * + * file:///{id}{#directive,*} + */ +export interface ResourceLocator__Output { + /** + * URI scheme. + */ + 'scheme': (_xds_core_v3_ResourceLocator_Scheme__Output); + /** + * Opaque identifier for the resource. Any '/' will not be escaped during URI + * encoding and will form part of the URI path. This may end + * with ‘*’ for glob collection references. + */ + 'id': (string); + /** + * Logical authority for resource (not necessarily transport network address). + * Authorities are opaque in the xDS API, data-plane load balancers will map + * them to concrete network transports such as an xDS management server, e.g. + * via envoy.config.core.v3.ConfigSource. + */ + 'authority': (string); + /** + * Fully qualified resource type (as in type URL without types.googleapis.com/ + * prefix). + */ + 'resource_type': (string); + /** + * Additional parameters that can be used to select resource variants. + * Matches must be exact, i.e. all context parameters must match exactly and + * there must be no additional context parameters set on the matched + * resource. + */ + 'exact_context'?: (_xds_core_v3_ContextParams__Output | null); + /** + * A list of directives that appear in the xDS resource locator #fragment. + * + * When encoding to URI form, directives are percent encoded with comma + * separation. + */ + 'directives': (_xds_core_v3_ResourceLocator_Directive__Output)[]; + 'context_param_specifier': "exact_context"; +} diff --git a/packages/grpc-js-xds/src/generated/xds/core/v3/TypedExtensionConfig.ts b/packages/grpc-js-xds/src/generated/xds/core/v3/TypedExtensionConfig.ts new file mode 100644 index 000000000..b6ce18c0a --- /dev/null +++ b/packages/grpc-js-xds/src/generated/xds/core/v3/TypedExtensionConfig.ts @@ -0,0 +1,43 @@ +// Original file: deps/xds/xds/core/v3/extension.proto + +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../google/protobuf/Any'; + +/** + * Message type for extension configuration. + */ +export interface TypedExtensionConfig { + /** + * The name of an extension. This is not used to select the extension, instead + * it serves the role of an opaque identifier. + */ + 'name'?: (string); + /** + * The typed config for the extension. The type URL will be used to identify + * the extension. In the case that the type URL is *xds.type.v3.TypedStruct* + * (or, for historical reasons, *udpa.type.v1.TypedStruct*), the inner type + * URL of *TypedStruct* will be utilized. See the + * :ref:`extension configuration overview + * ` for further details. + */ + 'typed_config'?: (_google_protobuf_Any | null); +} + +/** + * Message type for extension configuration. + */ +export interface TypedExtensionConfig__Output { + /** + * The name of an extension. This is not used to select the extension, instead + * it serves the role of an opaque identifier. + */ + 'name': (string); + /** + * The typed config for the extension. The type URL will be used to identify + * the extension. In the case that the type URL is *xds.type.v3.TypedStruct* + * (or, for historical reasons, *udpa.type.v1.TypedStruct*), the inner type + * URL of *TypedStruct* will be utilized. See the + * :ref:`extension configuration overview + * ` for further details. + */ + 'typed_config': (_google_protobuf_Any__Output | null); +} diff --git a/packages/grpc-js-xds/src/generated/xds/type/matcher/v3/ListStringMatcher.ts b/packages/grpc-js-xds/src/generated/xds/type/matcher/v3/ListStringMatcher.ts new file mode 100644 index 000000000..e839f292b --- /dev/null +++ b/packages/grpc-js-xds/src/generated/xds/type/matcher/v3/ListStringMatcher.ts @@ -0,0 +1,17 @@ +// Original file: deps/xds/xds/type/matcher/v3/string.proto + +import type { StringMatcher as _xds_type_matcher_v3_StringMatcher, StringMatcher__Output as _xds_type_matcher_v3_StringMatcher__Output } from '../../../../xds/type/matcher/v3/StringMatcher'; + +/** + * Specifies a list of ways to match a string. + */ +export interface ListStringMatcher { + 'patterns'?: (_xds_type_matcher_v3_StringMatcher)[]; +} + +/** + * Specifies a list of ways to match a string. + */ +export interface ListStringMatcher__Output { + 'patterns': (_xds_type_matcher_v3_StringMatcher__Output)[]; +} diff --git a/packages/grpc-js-xds/src/generated/xds/type/matcher/v3/Matcher.ts b/packages/grpc-js-xds/src/generated/xds/type/matcher/v3/Matcher.ts new file mode 100644 index 000000000..be93c0f16 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/xds/type/matcher/v3/Matcher.ts @@ -0,0 +1,307 @@ +// Original file: deps/xds/xds/type/matcher/v3/matcher.proto + +import type { Matcher as _xds_type_matcher_v3_Matcher, Matcher__Output as _xds_type_matcher_v3_Matcher__Output } from '../../../../xds/type/matcher/v3/Matcher'; +import type { TypedExtensionConfig as _xds_core_v3_TypedExtensionConfig, TypedExtensionConfig__Output as _xds_core_v3_TypedExtensionConfig__Output } from '../../../../xds/core/v3/TypedExtensionConfig'; +import type { StringMatcher as _xds_type_matcher_v3_StringMatcher, StringMatcher__Output as _xds_type_matcher_v3_StringMatcher__Output } from '../../../../xds/type/matcher/v3/StringMatcher'; + +/** + * An individual matcher. + */ +export interface _xds_type_matcher_v3_Matcher_MatcherList_FieldMatcher { + /** + * Determines if the match succeeds. + */ + 'predicate'?: (_xds_type_matcher_v3_Matcher_MatcherList_Predicate | null); + /** + * What to do if the match succeeds. + */ + 'on_match'?: (_xds_type_matcher_v3_Matcher_OnMatch | null); +} + +/** + * An individual matcher. + */ +export interface _xds_type_matcher_v3_Matcher_MatcherList_FieldMatcher__Output { + /** + * Determines if the match succeeds. + */ + 'predicate': (_xds_type_matcher_v3_Matcher_MatcherList_Predicate__Output | null); + /** + * What to do if the match succeeds. + */ + 'on_match': (_xds_type_matcher_v3_Matcher_OnMatch__Output | null); +} + +/** + * A map of configured matchers. Used to allow using a map within a oneof. + */ +export interface _xds_type_matcher_v3_Matcher_MatcherTree_MatchMap { + 'map'?: ({[key: string]: _xds_type_matcher_v3_Matcher_OnMatch}); +} + +/** + * A map of configured matchers. Used to allow using a map within a oneof. + */ +export interface _xds_type_matcher_v3_Matcher_MatcherTree_MatchMap__Output { + 'map': ({[key: string]: _xds_type_matcher_v3_Matcher_OnMatch__Output}); +} + +/** + * A linear list of field matchers. + * The field matchers are evaluated in order, and the first match + * wins. + */ +export interface _xds_type_matcher_v3_Matcher_MatcherList { + /** + * A list of matchers. First match wins. + */ + 'matchers'?: (_xds_type_matcher_v3_Matcher_MatcherList_FieldMatcher)[]; +} + +/** + * A linear list of field matchers. + * The field matchers are evaluated in order, and the first match + * wins. + */ +export interface _xds_type_matcher_v3_Matcher_MatcherList__Output { + /** + * A list of matchers. First match wins. + */ + 'matchers': (_xds_type_matcher_v3_Matcher_MatcherList_FieldMatcher__Output)[]; +} + +export interface _xds_type_matcher_v3_Matcher_MatcherTree { + /** + * Protocol-specific specification of input field to match on. + */ + 'input'?: (_xds_core_v3_TypedExtensionConfig | null); + 'exact_match_map'?: (_xds_type_matcher_v3_Matcher_MatcherTree_MatchMap | null); + /** + * Longest matching prefix wins. + */ + 'prefix_match_map'?: (_xds_type_matcher_v3_Matcher_MatcherTree_MatchMap | null); + /** + * Extension for custom matching logic. + */ + 'custom_match'?: (_xds_core_v3_TypedExtensionConfig | null); + /** + * Exact or prefix match maps in which to look up the input value. + * If the lookup succeeds, the match is considered successful, and + * the corresponding OnMatch is used. + */ + 'tree_type'?: "exact_match_map"|"prefix_match_map"|"custom_match"; +} + +export interface _xds_type_matcher_v3_Matcher_MatcherTree__Output { + /** + * Protocol-specific specification of input field to match on. + */ + 'input': (_xds_core_v3_TypedExtensionConfig__Output | null); + 'exact_match_map'?: (_xds_type_matcher_v3_Matcher_MatcherTree_MatchMap__Output | null); + /** + * Longest matching prefix wins. + */ + 'prefix_match_map'?: (_xds_type_matcher_v3_Matcher_MatcherTree_MatchMap__Output | null); + /** + * Extension for custom matching logic. + */ + 'custom_match'?: (_xds_core_v3_TypedExtensionConfig__Output | null); + /** + * Exact or prefix match maps in which to look up the input value. + * If the lookup succeeds, the match is considered successful, and + * the corresponding OnMatch is used. + */ + 'tree_type': "exact_match_map"|"prefix_match_map"|"custom_match"; +} + +/** + * What to do if a match is successful. + */ +export interface _xds_type_matcher_v3_Matcher_OnMatch { + /** + * Nested matcher to evaluate. + * If the nested matcher does not match and does not specify + * on_no_match, then this matcher is considered not to have + * matched, even if a predicate at this level or above returned + * true. + */ + 'matcher'?: (_xds_type_matcher_v3_Matcher | null); + /** + * Protocol-specific action to take. + */ + 'action'?: (_xds_core_v3_TypedExtensionConfig | null); + 'on_match'?: "matcher"|"action"; +} + +/** + * What to do if a match is successful. + */ +export interface _xds_type_matcher_v3_Matcher_OnMatch__Output { + /** + * Nested matcher to evaluate. + * If the nested matcher does not match and does not specify + * on_no_match, then this matcher is considered not to have + * matched, even if a predicate at this level or above returned + * true. + */ + 'matcher'?: (_xds_type_matcher_v3_Matcher__Output | null); + /** + * Protocol-specific action to take. + */ + 'action'?: (_xds_core_v3_TypedExtensionConfig__Output | null); + 'on_match': "matcher"|"action"; +} + +/** + * Predicate to determine if a match is successful. + */ +export interface _xds_type_matcher_v3_Matcher_MatcherList_Predicate { + /** + * A single predicate to evaluate. + */ + 'single_predicate'?: (_xds_type_matcher_v3_Matcher_MatcherList_Predicate_SinglePredicate | null); + /** + * A list of predicates to be OR-ed together. + */ + 'or_matcher'?: (_xds_type_matcher_v3_Matcher_MatcherList_Predicate_PredicateList | null); + /** + * A list of predicates to be AND-ed together. + */ + 'and_matcher'?: (_xds_type_matcher_v3_Matcher_MatcherList_Predicate_PredicateList | null); + /** + * The invert of a predicate + */ + 'not_matcher'?: (_xds_type_matcher_v3_Matcher_MatcherList_Predicate | null); + 'match_type'?: "single_predicate"|"or_matcher"|"and_matcher"|"not_matcher"; +} + +/** + * Predicate to determine if a match is successful. + */ +export interface _xds_type_matcher_v3_Matcher_MatcherList_Predicate__Output { + /** + * A single predicate to evaluate. + */ + 'single_predicate'?: (_xds_type_matcher_v3_Matcher_MatcherList_Predicate_SinglePredicate__Output | null); + /** + * A list of predicates to be OR-ed together. + */ + 'or_matcher'?: (_xds_type_matcher_v3_Matcher_MatcherList_Predicate_PredicateList__Output | null); + /** + * A list of predicates to be AND-ed together. + */ + 'and_matcher'?: (_xds_type_matcher_v3_Matcher_MatcherList_Predicate_PredicateList__Output | null); + /** + * The invert of a predicate + */ + 'not_matcher'?: (_xds_type_matcher_v3_Matcher_MatcherList_Predicate__Output | null); + 'match_type': "single_predicate"|"or_matcher"|"and_matcher"|"not_matcher"; +} + +/** + * A list of two or more matchers. Used to allow using a list within a oneof. + */ +export interface _xds_type_matcher_v3_Matcher_MatcherList_Predicate_PredicateList { + 'predicate'?: (_xds_type_matcher_v3_Matcher_MatcherList_Predicate)[]; +} + +/** + * A list of two or more matchers. Used to allow using a list within a oneof. + */ +export interface _xds_type_matcher_v3_Matcher_MatcherList_Predicate_PredicateList__Output { + 'predicate': (_xds_type_matcher_v3_Matcher_MatcherList_Predicate__Output)[]; +} + +/** + * Predicate for a single input field. + */ +export interface _xds_type_matcher_v3_Matcher_MatcherList_Predicate_SinglePredicate { + /** + * Protocol-specific specification of input field to match on. + * [#extension-category: envoy.matching.common_inputs] + */ + 'input'?: (_xds_core_v3_TypedExtensionConfig | null); + /** + * Built-in string matcher. + */ + 'value_match'?: (_xds_type_matcher_v3_StringMatcher | null); + /** + * Extension for custom matching logic. + * [#extension-category: envoy.matching.input_matchers] + */ + 'custom_match'?: (_xds_core_v3_TypedExtensionConfig | null); + 'matcher'?: "value_match"|"custom_match"; +} + +/** + * Predicate for a single input field. + */ +export interface _xds_type_matcher_v3_Matcher_MatcherList_Predicate_SinglePredicate__Output { + /** + * Protocol-specific specification of input field to match on. + * [#extension-category: envoy.matching.common_inputs] + */ + 'input': (_xds_core_v3_TypedExtensionConfig__Output | null); + /** + * Built-in string matcher. + */ + 'value_match'?: (_xds_type_matcher_v3_StringMatcher__Output | null); + /** + * Extension for custom matching logic. + * [#extension-category: envoy.matching.input_matchers] + */ + 'custom_match'?: (_xds_core_v3_TypedExtensionConfig__Output | null); + 'matcher': "value_match"|"custom_match"; +} + +/** + * A matcher, which may traverse a matching tree in order to result in a match action. + * During matching, the tree will be traversed until a match is found, or if no match + * is found the action specified by the most specific on_no_match will be evaluated. + * As an on_no_match might result in another matching tree being evaluated, this process + * might repeat several times until the final OnMatch (or no match) is decided. + */ +export interface Matcher { + /** + * A linear list of matchers to evaluate. + */ + 'matcher_list'?: (_xds_type_matcher_v3_Matcher_MatcherList | null); + /** + * A match tree to evaluate. + */ + 'matcher_tree'?: (_xds_type_matcher_v3_Matcher_MatcherTree | null); + /** + * Optional OnMatch to use if the matcher failed. + * If specified, the OnMatch is used, and the matcher is considered + * to have matched. + * If not specified, the matcher is considered not to have matched. + */ + 'on_no_match'?: (_xds_type_matcher_v3_Matcher_OnMatch | null); + 'matcher_type'?: "matcher_list"|"matcher_tree"; +} + +/** + * A matcher, which may traverse a matching tree in order to result in a match action. + * During matching, the tree will be traversed until a match is found, or if no match + * is found the action specified by the most specific on_no_match will be evaluated. + * As an on_no_match might result in another matching tree being evaluated, this process + * might repeat several times until the final OnMatch (or no match) is decided. + */ +export interface Matcher__Output { + /** + * A linear list of matchers to evaluate. + */ + 'matcher_list'?: (_xds_type_matcher_v3_Matcher_MatcherList__Output | null); + /** + * A match tree to evaluate. + */ + 'matcher_tree'?: (_xds_type_matcher_v3_Matcher_MatcherTree__Output | null); + /** + * Optional OnMatch to use if the matcher failed. + * If specified, the OnMatch is used, and the matcher is considered + * to have matched. + * If not specified, the matcher is considered not to have matched. + */ + 'on_no_match': (_xds_type_matcher_v3_Matcher_OnMatch__Output | null); + 'matcher_type': "matcher_list"|"matcher_tree"; +} diff --git a/packages/grpc-js-xds/src/generated/xds/type/matcher/v3/RegexMatcher.ts b/packages/grpc-js-xds/src/generated/xds/type/matcher/v3/RegexMatcher.ts new file mode 100644 index 000000000..575051041 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/xds/type/matcher/v3/RegexMatcher.ts @@ -0,0 +1,80 @@ +// Original file: deps/xds/xds/type/matcher/v3/regex.proto + + +/** + * Google's `RE2 `_ regex engine. The regex + * string must adhere to the documented `syntax + * `_. The engine is designed to + * complete execution in linear time as well as limit the amount of memory + * used. + * + * Envoy supports program size checking via runtime. The runtime keys + * `re2.max_program_size.error_level` and `re2.max_program_size.warn_level` + * can be set to integers as the maximum program size or complexity that a + * compiled regex can have before an exception is thrown or a warning is + * logged, respectively. `re2.max_program_size.error_level` defaults to 100, + * and `re2.max_program_size.warn_level` has no default if unset (will not + * check/log a warning). + * + * Envoy emits two stats for tracking the program size of regexes: the + * histogram `re2.program_size`, which records the program size, and the + * counter `re2.exceeded_warn_level`, which is incremented each time the + * program size exceeds the warn level threshold. + */ +export interface _xds_type_matcher_v3_RegexMatcher_GoogleRE2 { +} + +/** + * Google's `RE2 `_ regex engine. The regex + * string must adhere to the documented `syntax + * `_. The engine is designed to + * complete execution in linear time as well as limit the amount of memory + * used. + * + * Envoy supports program size checking via runtime. The runtime keys + * `re2.max_program_size.error_level` and `re2.max_program_size.warn_level` + * can be set to integers as the maximum program size or complexity that a + * compiled regex can have before an exception is thrown or a warning is + * logged, respectively. `re2.max_program_size.error_level` defaults to 100, + * and `re2.max_program_size.warn_level` has no default if unset (will not + * check/log a warning). + * + * Envoy emits two stats for tracking the program size of regexes: the + * histogram `re2.program_size`, which records the program size, and the + * counter `re2.exceeded_warn_level`, which is incremented each time the + * program size exceeds the warn level threshold. + */ +export interface _xds_type_matcher_v3_RegexMatcher_GoogleRE2__Output { +} + +/** + * A regex matcher designed for safety when used with untrusted input. + */ +export interface RegexMatcher { + /** + * Google's RE2 regex engine. + */ + 'google_re2'?: (_xds_type_matcher_v3_RegexMatcher_GoogleRE2 | null); + /** + * The regex match string. The string must be supported by the configured + * engine. + */ + 'regex'?: (string); + 'engine_type'?: "google_re2"; +} + +/** + * A regex matcher designed for safety when used with untrusted input. + */ +export interface RegexMatcher__Output { + /** + * Google's RE2 regex engine. + */ + 'google_re2'?: (_xds_type_matcher_v3_RegexMatcher_GoogleRE2__Output | null); + /** + * The regex match string. The string must be supported by the configured + * engine. + */ + 'regex': (string); + 'engine_type': "google_re2"; +} diff --git a/packages/grpc-js-xds/src/generated/envoy/type/matcher/StringMatcher.ts b/packages/grpc-js-xds/src/generated/xds/type/matcher/v3/StringMatcher.ts similarity index 58% rename from packages/grpc-js-xds/src/generated/envoy/type/matcher/StringMatcher.ts rename to packages/grpc-js-xds/src/generated/xds/type/matcher/v3/StringMatcher.ts index e434b1e1b..af2f2f56f 100644 --- a/packages/grpc-js-xds/src/generated/envoy/type/matcher/StringMatcher.ts +++ b/packages/grpc-js-xds/src/generated/xds/type/matcher/v3/StringMatcher.ts @@ -1,10 +1,10 @@ -// Original file: deps/envoy-api/envoy/type/matcher/string.proto +// Original file: deps/xds/xds/type/matcher/v3/string.proto -import type { RegexMatcher as _envoy_type_matcher_RegexMatcher, RegexMatcher__Output as _envoy_type_matcher_RegexMatcher__Output } from '../../../envoy/type/matcher/RegexMatcher'; +import type { RegexMatcher as _xds_type_matcher_v3_RegexMatcher, RegexMatcher__Output as _xds_type_matcher_v3_RegexMatcher__Output } from '../../../../xds/type/matcher/v3/RegexMatcher'; /** * Specifies the way to match a string. - * [#next-free-field: 7] + * [#next-free-field: 8] */ export interface StringMatcher { /** @@ -33,38 +33,31 @@ export interface StringMatcher { * * *abc* matches the value *xyz.abc* */ 'suffix'?: (string); - /** - * The input string must match the regular expression specified here. - * The regex grammar is defined `here - * `_. - * - * Examples: - * - * * The regex ``\d{3}`` matches the value *123* - * * The regex ``\d{3}`` does not match the value *1234* - * * The regex ``\d{3}`` does not match the value *123.456* - * - * .. attention:: - * This field has been deprecated in favor of `safe_regex` as it is not safe for use with - * untrusted input in all cases. - */ - 'regex'?: (string); /** * The input string must match the regular expression specified here. */ - 'safe_regex'?: (_envoy_type_matcher_RegexMatcher); + 'safe_regex'?: (_xds_type_matcher_v3_RegexMatcher | null); /** * If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no * effect for the safe_regex match. * For example, the matcher *data* will match both input string *Data* and *data* if set to true. */ 'ignore_case'?: (boolean); - 'match_pattern'?: "exact"|"prefix"|"suffix"|"regex"|"safe_regex"; + /** + * The input string must have the substring specified here. + * Note: empty contains match is not allowed, please use regex instead. + * + * Examples: + * + * * *abc* matches the value *xyz.abc.def* + */ + 'contains'?: (string); + 'match_pattern'?: "exact"|"prefix"|"suffix"|"safe_regex"|"contains"; } /** * Specifies the way to match a string. - * [#next-free-field: 7] + * [#next-free-field: 8] */ export interface StringMatcher__Output { /** @@ -93,31 +86,24 @@ export interface StringMatcher__Output { * * *abc* matches the value *xyz.abc* */ 'suffix'?: (string); - /** - * The input string must match the regular expression specified here. - * The regex grammar is defined `here - * `_. - * - * Examples: - * - * * The regex ``\d{3}`` matches the value *123* - * * The regex ``\d{3}`` does not match the value *1234* - * * The regex ``\d{3}`` does not match the value *123.456* - * - * .. attention:: - * This field has been deprecated in favor of `safe_regex` as it is not safe for use with - * untrusted input in all cases. - */ - 'regex'?: (string); /** * The input string must match the regular expression specified here. */ - 'safe_regex'?: (_envoy_type_matcher_RegexMatcher__Output); + 'safe_regex'?: (_xds_type_matcher_v3_RegexMatcher__Output | null); /** * If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no * effect for the safe_regex match. * For example, the matcher *data* will match both input string *Data* and *data* if set to true. */ 'ignore_case': (boolean); - 'match_pattern': "exact"|"prefix"|"suffix"|"regex"|"safe_regex"; + /** + * The input string must have the substring specified here. + * Note: empty contains match is not allowed, please use regex instead. + * + * Examples: + * + * * *abc* matches the value *xyz.abc.def* + */ + 'contains'?: (string); + 'match_pattern': "exact"|"prefix"|"suffix"|"safe_regex"|"contains"; } diff --git a/packages/grpc-js-xds/src/generated/xds/type/v3/TypedStruct.ts b/packages/grpc-js-xds/src/generated/xds/type/v3/TypedStruct.ts new file mode 100644 index 000000000..a0df831d2 --- /dev/null +++ b/packages/grpc-js-xds/src/generated/xds/type/v3/TypedStruct.ts @@ -0,0 +1,77 @@ +// Original file: deps/xds/xds/type/v3/typed_struct.proto + +import type { Struct as _google_protobuf_Struct, Struct__Output as _google_protobuf_Struct__Output } from '../../../google/protobuf/Struct'; + +/** + * A TypedStruct contains an arbitrary JSON serialized protocol buffer message with a URL that + * describes the type of the serialized message. This is very similar to google.protobuf.Any, + * instead of having protocol buffer binary, this employs google.protobuf.Struct as value. + * + * This message is intended to be embedded inside Any, so it shouldn't be directly referred + * from other UDPA messages. + * + * When packing an opaque extension config, packing the expected type into Any is preferred + * wherever possible for its efficiency. TypedStruct should be used only if a proto descriptor + * is not available, for example if: + * - A control plane sends opaque message that is originally from external source in human readable + * format such as JSON or YAML. + * - The control plane doesn't have the knowledge of the protocol buffer schema hence it cannot + * serialize the message in protocol buffer binary format. + * - The DPLB doesn't have have the knowledge of the protocol buffer schema its plugin or extension + * uses. This has to be indicated in the DPLB capability negotiation. + * + * When a DPLB receives a TypedStruct in Any, it should: + * - Check if the type_url of the TypedStruct matches the type the extension expects. + * - Convert value to the type described in type_url and perform validation. + * TODO(lizan): Figure out how TypeStruct should be used with DPLB extensions that doesn't link + * protobuf descriptor with DPLB itself, (e.g. gRPC LB Plugin, Envoy WASM extensions). + */ +export interface TypedStruct { + /** + * A URL that uniquely identifies the type of the serialize protocol buffer message. + * This has same semantics and format described in google.protobuf.Any: + * https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/any.proto + */ + 'type_url'?: (string); + /** + * A JSON representation of the above specified type. + */ + 'value'?: (_google_protobuf_Struct | null); +} + +/** + * A TypedStruct contains an arbitrary JSON serialized protocol buffer message with a URL that + * describes the type of the serialized message. This is very similar to google.protobuf.Any, + * instead of having protocol buffer binary, this employs google.protobuf.Struct as value. + * + * This message is intended to be embedded inside Any, so it shouldn't be directly referred + * from other UDPA messages. + * + * When packing an opaque extension config, packing the expected type into Any is preferred + * wherever possible for its efficiency. TypedStruct should be used only if a proto descriptor + * is not available, for example if: + * - A control plane sends opaque message that is originally from external source in human readable + * format such as JSON or YAML. + * - The control plane doesn't have the knowledge of the protocol buffer schema hence it cannot + * serialize the message in protocol buffer binary format. + * - The DPLB doesn't have have the knowledge of the protocol buffer schema its plugin or extension + * uses. This has to be indicated in the DPLB capability negotiation. + * + * When a DPLB receives a TypedStruct in Any, it should: + * - Check if the type_url of the TypedStruct matches the type the extension expects. + * - Convert value to the type described in type_url and perform validation. + * TODO(lizan): Figure out how TypeStruct should be used with DPLB extensions that doesn't link + * protobuf descriptor with DPLB itself, (e.g. gRPC LB Plugin, Envoy WASM extensions). + */ +export interface TypedStruct__Output { + /** + * A URL that uniquely identifies the type of the serialize protocol buffer message. + * This has same semantics and format described in google.protobuf.Any: + * https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/any.proto + */ + 'type_url': (string); + /** + * A JSON representation of the above specified type. + */ + 'value': (_google_protobuf_Struct__Output | null); +} diff --git a/packages/grpc-js-xds/src/http-filter.ts b/packages/grpc-js-xds/src/http-filter.ts new file mode 100644 index 000000000..f8da5b828 --- /dev/null +++ b/packages/grpc-js-xds/src/http-filter.ts @@ -0,0 +1,246 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This is a non-public, unstable API, but it's very convenient +import { loadProtosWithOptionsSync } from '@grpc/proto-loader/build/src/util'; +import { experimental, logVerbosity } from '@grpc/grpc-js'; +import { Any__Output } from './generated/google/protobuf/Any'; +import Filter = experimental.Filter; +import FilterFactory = experimental.FilterFactory; +import { TypedStruct__Output as TypedStruct__Output } from './generated/xds/type/v3/TypedStruct'; +import { FilterConfig__Output } from './generated/envoy/config/route/v3/FilterConfig'; +import { HttpFilter__Output } from './generated/envoy/extensions/filters/network/http_connection_manager/v3/HttpFilter'; + +const TRACER_NAME = 'http_filter'; + +function trace(text: string): void { + experimental.trace(logVerbosity.DEBUG, TRACER_NAME, text); +} + +const TYPED_STRUCT_UDPA_URL = 'type.googleapis.com/udpa.type.v1.TypedStruct'; +const TYPED_STRUCT_UDPA_NAME = 'udpa.type.v1.TypedStruct'; +const TYPED_STRUCT_XDS_URL = 'type.googleapis.com/xds.type.v3.TypedStruct'; +const TYPED_STRUCT_XDS_NAME = 'xds.type.v3.TypedStruct'; + +const FILTER_CONFIG_URL = 'type.googleapis.com/envoy.config.route.v3.FilterConfig'; +const FILTER_CONFIG_NAME = 'envoy.config.route.v3.FilterConfig'; + +const resourceRoot = loadProtosWithOptionsSync([ + 'udpa/type/v1/typed_struct.proto', + 'xds/type/v3/typed_struct.proto', + 'envoy/config/route/v3/route_components.proto'], { + keepCase: true, + includeDirs: [ + // Paths are relative to src/build + __dirname + '/../../deps/xds/', + __dirname + '/../../deps/envoy-api/', + __dirname + '/../../deps/protoc-gen-validate/' + ], + } +); + +export interface HttpFilterConfig { + typeUrl: string; + config: any; +} + +export interface HttpFilterFactoryConstructor { + new(config: HttpFilterConfig, overrideConfig?: HttpFilterConfig): FilterFactory; +} + +export interface HttpFilterRegistryEntry { + parseTopLevelFilterConfig(encodedConfig: Any__Output): HttpFilterConfig | null; + parseOverrideFilterConfig(encodedConfig: Any__Output): HttpFilterConfig | null; + httpFilterConstructor: HttpFilterFactoryConstructor; +} + +const FILTER_REGISTRY = new Map(); + +export function registerHttpFilter(typeName: string, entry: HttpFilterRegistryEntry) { + trace('Registered filter with type URL ' + typeName); + FILTER_REGISTRY.set(typeName, entry); +} + +const toObjectOptions = { + longs: String, + enums: String, + defaults: true, + oneofs: true +} + +function parseAnyMessage(message: Any__Output): MessageType | null { + const typeName = message.type_url.substring(message.type_url.lastIndexOf('/') + 1); + const messageType = resourceRoot.lookup(typeName); + if (messageType) { + const decodedMessage = (messageType as any).decode(message.value); + return decodedMessage.$type.toObject(decodedMessage, toObjectOptions) as MessageType; + } else { + return null; + } +} + +export function getTopLevelFilterUrl(encodedConfig: Any__Output): string { + let typeUrl: string; + if (encodedConfig.type_url === TYPED_STRUCT_UDPA_URL || encodedConfig.type_url === TYPED_STRUCT_XDS_URL) { + const typedStruct = parseAnyMessage(encodedConfig) + if (typedStruct) { + return typedStruct.type_url; + } else { + throw new Error('Failed to parse TypedStruct'); + } + } else { + return encodedConfig.type_url; + } +} + +export function validateTopLevelFilter(httpFilter: HttpFilter__Output): boolean { + if (!httpFilter.typed_config) { + trace(httpFilter.name + ' validation failed: typed_config unset'); + return false; + } + const encodedConfig = httpFilter.typed_config; + let typeUrl: string; + try { + typeUrl = getTopLevelFilterUrl(encodedConfig); + } catch (e) { + trace(httpFilter.name + ' validation failed with error ' + (e as Error).message); + return false; + } + const registryEntry = FILTER_REGISTRY.get(typeUrl); + if (registryEntry) { + const parsedConfig = registryEntry.parseTopLevelFilterConfig(encodedConfig); + if (parsedConfig === null) { + trace(httpFilter.name + ' validation failed: config parsing failed'); + } + return parsedConfig !== null; + } else { + if (httpFilter.is_optional) { + return true; + } else { + trace(httpFilter.name + ' validation failed: filter is not optional and registry does not contain type URL ' + typeUrl); + return false; + } + } +} + +export function validateOverrideFilter(encodedConfig: Any__Output): boolean { + let typeUrl: string; + let realConfig: Any__Output; + let isOptional = false; + if (encodedConfig.type_url === FILTER_CONFIG_URL) { + const filterConfig = parseAnyMessage(encodedConfig); + if (filterConfig) { + isOptional = filterConfig.is_optional; + if (filterConfig.config) { + realConfig = filterConfig.config; + } else { + trace('Override filter validation failed: FilterConfig config field is empty'); + return false; + } + } else { + trace('Override filter validation failed: failed to parse FilterConfig message'); + return false; + } + } else { + realConfig = encodedConfig; + } + if (realConfig.type_url === TYPED_STRUCT_UDPA_URL || realConfig.type_url === TYPED_STRUCT_XDS_URL) { + const typedStruct = parseAnyMessage(encodedConfig); + if (typedStruct) { + typeUrl = typedStruct.type_url; + } else { + trace('Override filter validation failed: failed to parse TypedStruct message'); + return false; + } + } else { + typeUrl = realConfig.type_url; + } + const registryEntry = FILTER_REGISTRY.get(typeUrl); + if (registryEntry) { + const parsedConfig = registryEntry.parseOverrideFilterConfig(encodedConfig); + if (parsedConfig === null) { + trace('Override filter validation failed: config parsing failed. Type URL: ' + typeUrl); + } + return parsedConfig !== null; + } else { + if (isOptional) { + return true; + } else { + trace('Override filter validation failed: filter is not optional and registry does not contain type URL ' + typeUrl); + return false; + } + } +} + +export function parseTopLevelFilterConfig(encodedConfig: Any__Output) { + let typeUrl: string; + try { + typeUrl = getTopLevelFilterUrl(encodedConfig); + } catch (e) { + return null; + } + const registryEntry = FILTER_REGISTRY.get(typeUrl); + if (registryEntry) { + return registryEntry.parseTopLevelFilterConfig(encodedConfig); + } else { + // Filter type URL not found in registry + return null; + } +} + +export function parseOverrideFilterConfig(encodedConfig: Any__Output) { + let typeUrl: string; + let realConfig: Any__Output; + if (encodedConfig.type_url === FILTER_CONFIG_URL) { + const filterConfig = parseAnyMessage(encodedConfig); + if (filterConfig) { + if (filterConfig.config) { + realConfig = filterConfig.config; + } else { + return null; + } + } else { + return null; + } + } else { + realConfig = encodedConfig; + } + if (realConfig.type_url === TYPED_STRUCT_UDPA_URL || realConfig.type_url === TYPED_STRUCT_XDS_URL) { + const typedStruct = parseAnyMessage(encodedConfig); + if (typedStruct) { + typeUrl = typedStruct.type_url; + } else { + return null; + } + } else { + typeUrl = realConfig.type_url; + } + const registryEntry = FILTER_REGISTRY.get(typeUrl); + if (registryEntry) { + return registryEntry.parseOverrideFilterConfig(encodedConfig); + } else { + return null; + } +} + +export function createHttpFilter(config: HttpFilterConfig, overrideConfig?: HttpFilterConfig): FilterFactory | null { + const registryEntry = FILTER_REGISTRY.get(config.typeUrl); + if (registryEntry) { + return new registryEntry.httpFilterConstructor(config, overrideConfig); + } else { + return null; + } +} diff --git a/packages/grpc-js-xds/src/http-filter/fault-injection-filter.ts b/packages/grpc-js-xds/src/http-filter/fault-injection-filter.ts new file mode 100644 index 000000000..b02dfbc80 --- /dev/null +++ b/packages/grpc-js-xds/src/http-filter/fault-injection-filter.ts @@ -0,0 +1,347 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This is a non-public, unstable API, but it's very convenient +import { loadProtosWithOptionsSync } from '@grpc/proto-loader/build/src/util'; +import { experimental, logVerbosity, Metadata, status } from '@grpc/grpc-js'; +import { Any__Output } from '../generated/google/protobuf/Any'; +import Filter = experimental.Filter; +import FilterFactory = experimental.FilterFactory; +import BaseFilter = experimental.BaseFilter; +import CallStream = experimental.CallStream; +import { HttpFilterConfig, registerHttpFilter } from '../http-filter'; +import { HTTPFault__Output } from '../generated/envoy/extensions/filters/http/fault/v3/HTTPFault'; +import { envoyFractionToFraction, Fraction } from '../fraction'; +import { Duration__Output } from '../generated/google/protobuf/Duration'; + +const TRACER_NAME = 'fault_injection'; + +function trace(text: string): void { + experimental.trace(logVerbosity.DEBUG, TRACER_NAME, text); +} + +const resourceRoot = loadProtosWithOptionsSync([ + 'envoy/extensions/filters/http/fault/v3/fault.proto'], { + keepCase: true, + includeDirs: [ + // Paths are relative to src/build/http-filter + __dirname + '/../../../deps/xds/', + __dirname + '/../../../deps/envoy-api/', + __dirname + '/../../../deps/protoc-gen-validate/' + ], + } +); + +interface FixedDelayConfig { + kind: 'fixed'; + durationMs: number; + percentage: Fraction; +} + +interface HeaderDelayConfig { + kind: 'header'; + percentage: Fraction; +} + +interface GrpcAbortConfig { + kind: 'grpc'; + code: status; + percentage: Fraction; +} + +interface HeaderAbortConfig { + kind: 'header'; + percentage: Fraction; +} + +interface FaultInjectionConfig { + delay: FixedDelayConfig | HeaderDelayConfig | null; + abort: GrpcAbortConfig | HeaderAbortConfig | null; + maxActiveFaults: number; +} + +interface FaultInjectionFilterConfig extends HttpFilterConfig { + typeUrl: 'type.googleapis.com/envoy.extensions.filters.http.fault.v3.HTTPFault'; + config: FaultInjectionConfig; +} + +const FAULT_INJECTION_FILTER_URL = 'type.googleapis.com/envoy.extensions.filters.http.fault.v3.HTTPFault'; + +const toObjectOptions = { + longs: String, + enums: String, + defaults: true, + oneofs: true +} + +function parseAnyMessage(message: Any__Output): MessageType | null { + const typeName = message.type_url.substring(message.type_url.lastIndexOf('/') + 1); + const messageType = resourceRoot.lookup(typeName); + if (messageType) { + const decodedMessage = (messageType as any).decode(message.value); + return decodedMessage.$type.toObject(decodedMessage, toObjectOptions) as MessageType; + } else { + return null; + } +} + +function durationToMs(duration: Duration__Output): number { + return Number.parseInt(duration.seconds) * 1000 + duration.nanos / 1_000_000; +} + +function httpCodeToGrpcStatus(code: number): status { + switch (code) { + case 400: return status.INTERNAL; + case 401: return status.UNAUTHENTICATED; + case 403: return status.PERMISSION_DENIED; + case 404: return status.UNIMPLEMENTED; + case 429: return status.UNAVAILABLE; + case 502: return status.UNAVAILABLE; + case 503: return status.UNAVAILABLE; + case 504: return status.UNAVAILABLE; + default: return status.UNKNOWN; + } +} + +function parseHTTPFaultConfig(encodedConfig: Any__Output): FaultInjectionFilterConfig | null { + if (encodedConfig.type_url !== FAULT_INJECTION_FILTER_URL) { + trace('Config parsing failed: unexpected type URL: ' + encodedConfig.type_url); + return null; + } + const parsedMessage = parseAnyMessage(encodedConfig); + if (parsedMessage === null) { + trace('Config parsing failed: failed to parse HTTPFault message'); + return null; + } + trace('Parsing HTTPFault message ' + JSON.stringify(parsedMessage, undefined, 2)); + const result: FaultInjectionConfig = { + delay: null, + abort: null, + maxActiveFaults: Infinity + }; + // Parse delay field + if (parsedMessage.delay !== null) { + if (parsedMessage.delay.percentage === null) { + trace('Config parsing failed: delay.percentage unset'); + return null; + } + const percentage = envoyFractionToFraction(parsedMessage.delay.percentage); + switch (parsedMessage.delay.fault_delay_secifier /* sic */) { + case 'fixed_delay': + result.delay = { + kind: 'fixed', + durationMs: durationToMs(parsedMessage.delay.fixed_delay!), + percentage: percentage + }; + break; + case 'header_delay': + result.delay = { + kind: 'header', + percentage: percentage + }; + break; + default: + trace('Config parsing failed: delay.fault_delay_secifier has unexpected value ' + parsedMessage.delay.fault_delay_secifier); + // Should not be possible + return null; + } + } + // Parse abort field + if (parsedMessage.abort !== null) { + if (parsedMessage.abort.percentage === null) { + trace('Config parsing failed: abort.percentage unset'); + return null; + } + const percentage = envoyFractionToFraction(parsedMessage.abort.percentage); + switch (parsedMessage.abort.error_type) { + case 'http_status': + result.abort = { + kind: 'grpc', + code: httpCodeToGrpcStatus(parsedMessage.abort.http_status!), + percentage: percentage + }; + break; + case 'grpc_status': + result.abort = { + kind: 'grpc', + code: parsedMessage.abort.grpc_status!, + percentage: percentage + } + break; + case 'header_abort': + result.abort = { + kind: 'header', + percentage: percentage + }; + break; + default: + trace('Config parsing failed: abort.error_type has unexpected value ' + parsedMessage.abort.error_type); + // Should not be possible + return null; + } + } + // Parse max_active_faults field + if (parsedMessage.max_active_faults !== null) { + result.maxActiveFaults = parsedMessage.max_active_faults.value; + } + return { + typeUrl: FAULT_INJECTION_FILTER_URL, + config: result + }; +} + +function asyncTimeout(timeMs: number): Promise { + return new Promise((resolve, reject) => { + setTimeout(() => { + resolve(); + }, timeMs); + }); +} + +/** + * Returns true with probability numerator/denominator. + * @param numerator + * @param denominator + */ +function rollRandomPercentage(numerator: number, denominator: number): boolean { + return Math.random() * denominator < numerator; +} + +const DELAY_DURATION_HEADER_KEY = 'x-envoy-fault-delay-request'; +const DELAY_PERCENTAGE_HEADER_KEY = 'x-envoy-fault-delay-request-percentage'; +const ABORT_GRPC_HEADER_KEY = 'x-envoy-fault-abort-grpc-request'; +const ABORT_HTTP_HEADER_KEY = 'x-envoy-fault-abort-request'; +const ABORT_PERCENTAGE_HEADER_KEY = 'x-envoy-fault-abort-request-percentage'; + +const NUMBER_REGEX = /\d+/; + +let totalActiveFaults = 0; + +class FaultInjectionFilter extends BaseFilter implements Filter { + constructor(private config: FaultInjectionConfig) { + super(); + } + + async sendMetadata(metadataPromise: Promise): Promise { + const metadata = await metadataPromise; + // Handle delay + if (totalActiveFaults < this.config.maxActiveFaults && this.config.delay) { + let duration = 0; + let numerator = this.config.delay.percentage.numerator; + const denominator = this.config.delay.percentage.denominator; + if (this.config.delay.kind === 'fixed') { + duration = this.config.delay.durationMs; + } else { + const durationHeader = metadata.get(DELAY_DURATION_HEADER_KEY); + for (const value of durationHeader) { + if (typeof value !== 'string') { + continue; + } + if (NUMBER_REGEX.test(value)) { + duration = Number.parseInt(value); + break; + } + } + const percentageHeader = metadata.get(DELAY_PERCENTAGE_HEADER_KEY); + for (const value of percentageHeader) { + if (typeof value !== 'string') { + continue; + } + if (NUMBER_REGEX.test(value)) { + numerator = Math.min(numerator, Number.parseInt(value)); + break; + } + } + } + if (rollRandomPercentage(numerator, denominator)) { + totalActiveFaults++; + await asyncTimeout(duration); + totalActiveFaults--; + } + } + // Handle abort + if (totalActiveFaults < this.config.maxActiveFaults && this.config.abort) { + let abortStatus: status | null = null; + let numerator = this.config.abort.percentage.numerator; + const denominator = this.config.abort.percentage.denominator; + if (this.config.abort.kind === 'grpc') { + abortStatus = this.config.abort.code; + } else { + const grpcStatusHeader = metadata.get(ABORT_GRPC_HEADER_KEY); + for (const value of grpcStatusHeader) { + if (typeof value !== 'string') { + continue; + } + if (NUMBER_REGEX.test(value)) { + abortStatus = Number.parseInt(value); + break; + } + } + /* Fall back to looking for HTTP status header if the gRPC status + * header is not present. */ + if (abortStatus === null) { + const httpStatusHeader = metadata.get(ABORT_HTTP_HEADER_KEY); + for (const value of httpStatusHeader) { + if (typeof value !== 'string') { + continue; + } + if (NUMBER_REGEX.test(value)) { + abortStatus = httpCodeToGrpcStatus(Number.parseInt(value)); + break; + } + } + } + const percentageHeader = metadata.get(ABORT_PERCENTAGE_HEADER_KEY); + for (const value of percentageHeader) { + if (typeof value !== 'string') { + continue; + } + if (NUMBER_REGEX.test(value)) { + numerator = Math.min(numerator, Number.parseInt(value)); + break; + } + } + } + if (abortStatus !== null && rollRandomPercentage(numerator, denominator)) { + return Promise.reject({code: abortStatus, details: 'Fault injected', metadata: new Metadata()}); + } + } + return metadata; + } +} + +class FaultInjectionFilterFactory implements FilterFactory { + private config: FaultInjectionConfig; + constructor(config: HttpFilterConfig, overrideConfig?: HttpFilterConfig) { + if (overrideConfig?.typeUrl === FAULT_INJECTION_FILTER_URL) { + this.config = overrideConfig.config; + } else { + this.config = config.config; + } + } + + createFilter(): FaultInjectionFilter { + return new FaultInjectionFilter(this.config); + } +} + +export function setup() { + registerHttpFilter(FAULT_INJECTION_FILTER_URL, { + parseTopLevelFilterConfig: parseHTTPFaultConfig, + parseOverrideFilterConfig: parseHTTPFaultConfig, + httpFilterConstructor: FaultInjectionFilterFactory + }); +} \ No newline at end of file diff --git a/packages/grpc-js-xds/src/http-filter/router-filter.ts b/packages/grpc-js-xds/src/http-filter/router-filter.ts new file mode 100644 index 000000000..172a08740 --- /dev/null +++ b/packages/grpc-js-xds/src/http-filter/router-filter.ts @@ -0,0 +1,49 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { experimental } from '@grpc/grpc-js'; +import { Any__Output } from '../generated/google/protobuf/Any'; +import { HttpFilterConfig, registerHttpFilter } from '../http-filter'; +import Filter = experimental.Filter; +import FilterFactory = experimental.FilterFactory; +import BaseFilter = experimental.BaseFilter; + +class RouterFilter extends BaseFilter implements Filter {} + +class RouterFilterFactory implements FilterFactory { + constructor(config: HttpFilterConfig, overrideConfig?: HttpFilterConfig) {} + + createFilter(): RouterFilter { + return new RouterFilter(); + } +} + +const ROUTER_FILTER_URL = 'type.googleapis.com/envoy.extensions.filters.http.router.v3.Router'; + +function parseConfig(encodedConfig: Any__Output): HttpFilterConfig | null { + return { + typeUrl: ROUTER_FILTER_URL, + config: null + }; +} + +export function setup() { + registerHttpFilter(ROUTER_FILTER_URL, { + parseTopLevelFilterConfig: parseConfig, + parseOverrideFilterConfig: parseConfig, + httpFilterConstructor: RouterFilterFactory + }); +} \ No newline at end of file diff --git a/packages/grpc-js-xds/src/index.ts b/packages/grpc-js-xds/src/index.ts index 1b24d25e6..aa603c9b7 100644 --- a/packages/grpc-js-xds/src/index.ts +++ b/packages/grpc-js-xds/src/index.ts @@ -17,11 +17,19 @@ import * as resolver_xds from './resolver-xds'; import * as load_balancer_cds from './load-balancer-cds'; -import * as load_balancer_eds from './load-balancer-eds'; -import * as load_balancer_lrs from './load-balancer-lrs'; +import * as xds_cluster_resolver from './load-balancer-xds-cluster-resolver'; +import * as xds_cluster_impl from './load-balancer-xds-cluster-impl'; import * as load_balancer_priority from './load-balancer-priority'; import * as load_balancer_weighted_target from './load-balancer-weighted-target'; import * as load_balancer_xds_cluster_manager from './load-balancer-xds-cluster-manager'; +import * as xds_wrr_locality from './load-balancer-xds-wrr-locality'; +import * as ring_hash from './load-balancer-ring-hash'; +import * as router_filter from './http-filter/router-filter'; +import * as fault_injection_filter from './http-filter/fault-injection-filter'; +import * as csds from './csds'; +import * as round_robin_lb from './lb-policy-registry/round-robin'; +import * as typed_struct_lb from './lb-policy-registry/typed-struct'; +import * as pick_first_lb from './lb-policy-registry/pick-first'; /** * Register the "xds:" name scheme with the @grpc/grpc-js library. @@ -29,9 +37,17 @@ import * as load_balancer_xds_cluster_manager from './load-balancer-xds-cluster- export function register() { resolver_xds.setup(); load_balancer_cds.setup(); - load_balancer_eds.setup(); - load_balancer_lrs.setup(); + xds_cluster_resolver.setup(); + xds_cluster_impl.setup(); load_balancer_priority.setup(); load_balancer_weighted_target.setup(); load_balancer_xds_cluster_manager.setup(); -} \ No newline at end of file + xds_wrr_locality.setup(); + ring_hash.setup(); + router_filter.setup(); + fault_injection_filter.setup(); + csds.setup(); + round_robin_lb.setup(); + typed_struct_lb.setup(); + pick_first_lb.setup(); +} diff --git a/packages/grpc-js-xds/src/lb-policy-registry.ts b/packages/grpc-js-xds/src/lb-policy-registry.ts new file mode 100644 index 000000000..f7aa74254 --- /dev/null +++ b/packages/grpc-js-xds/src/lb-policy-registry.ts @@ -0,0 +1,78 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// https://github.com/grpc/proposal/blob/master/A52-xds-custom-lb-policies.md + +import { LoadBalancingConfig, experimental, logVerbosity } from "@grpc/grpc-js"; +import { LoadBalancingPolicy__Output } from "./generated/envoy/config/cluster/v3/LoadBalancingPolicy"; +import { TypedExtensionConfig__Output } from "./generated/envoy/config/core/v3/TypedExtensionConfig"; + +const TRACER_NAME = 'lb_policy_registry'; +function trace(text: string) { + experimental.trace(logVerbosity.DEBUG, TRACER_NAME, text); +} + +const MAX_RECURSION_DEPTH = 16; + +/** + * Parse a protoPolicy to a LoadBalancingConfig. A null return value indicates + * that parsing failed, but that it should not be treated as an error, and + * instead the next policy should be used. + */ +interface ProtoLbPolicyConverter { + (protoPolicy: TypedExtensionConfig__Output, selectChildPolicy: (childPolicy: LoadBalancingPolicy__Output) => LoadBalancingConfig): LoadBalancingConfig | null; +} + +interface RegisteredLbPolicy { + convertToLoadBalancingPolicy: ProtoLbPolicyConverter; +} + +const registry: {[typeUrl: string]: RegisteredLbPolicy} = {} + +export function registerLbPolicy(typeUrl: string, converter: ProtoLbPolicyConverter) { + registry[typeUrl] = {convertToLoadBalancingPolicy: converter}; +} + +export function convertToLoadBalancingConfig(protoPolicy: LoadBalancingPolicy__Output, recursionDepth = 0): LoadBalancingConfig { + trace('Registry entries: [' + Object.keys(registry) + ']'); + if (recursionDepth > MAX_RECURSION_DEPTH) { + throw new Error(`convertToLoadBalancingConfig: Max recursion depth ${MAX_RECURSION_DEPTH} reached`); + } + for (const policyCandidate of protoPolicy.policies) { + trace('Attempting to parse config ' + JSON.stringify(policyCandidate)); + const extensionConfig = policyCandidate.typed_extension_config; + if (!extensionConfig?.typed_config) { + continue; + } + const typeUrl = extensionConfig.typed_config.type_url; + trace('Attempting to parse config with type_url=' + typeUrl); + let parseResult: LoadBalancingConfig | null; + if (typeUrl in registry) { + try { + parseResult = registry[typeUrl].convertToLoadBalancingPolicy(extensionConfig, childPolicy => convertToLoadBalancingConfig(childPolicy, recursionDepth + 1)); + } catch (e) { + throw new Error(`Error parsing ${typeUrl} LoadBalancingPolicy named ${extensionConfig.name}: ${(e as Error).message}`); + } + if (parseResult) { + return parseResult; + } else { + continue; + } + } + } + throw new Error('No registered LB policy found in list'); +} diff --git a/packages/grpc-js-xds/src/lb-policy-registry/pick-first.ts b/packages/grpc-js-xds/src/lb-policy-registry/pick-first.ts new file mode 100644 index 000000000..bfe2793d8 --- /dev/null +++ b/packages/grpc-js-xds/src/lb-policy-registry/pick-first.ts @@ -0,0 +1,77 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// https://github.com/grpc/proposal/blob/master/A62-pick-first.md#pick_first-via-xds-1 + +import { LoadBalancingConfig } from "@grpc/grpc-js"; +import { LoadBalancingPolicy__Output } from "../generated/envoy/config/cluster/v3/LoadBalancingPolicy"; +import { TypedExtensionConfig__Output } from "../generated/envoy/config/core/v3/TypedExtensionConfig"; +import { loadProtosWithOptionsSync } from "@grpc/proto-loader/build/src/util"; +import { Any__Output } from "../generated/google/protobuf/Any"; +import { PickFirst__Output } from "../generated/envoy/extensions/load_balancing_policies/pick_first/v3/PickFirst"; +import { EXPERIMENTAL_PICK_FIRST } from "../environment"; +import { registerLbPolicy } from "../lb-policy-registry"; + +const PICK_FIRST_TYPE_URL = 'type.googleapis.com/envoy.extensions.load_balancing_policies.pick_first.v3.PickFirst'; + +const resourceRoot = loadProtosWithOptionsSync([ + 'envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.proto'], { + keepCase: true, + includeDirs: [ + // Paths are relative to src/build/lb-policy-registry + __dirname + '/../../../deps/envoy-api/', + __dirname + '/../../../deps/xds/', + __dirname + '/../../../deps/protoc-gen-validate' + ], + } +); + +const toObjectOptions = { + longs: String, + enums: String, + defaults: true, + oneofs: true +} + +function decodePickFirstConfig(message: Any__Output): PickFirst__Output { + const name = message.type_url.substring(message.type_url.lastIndexOf('/') + 1); + const type = resourceRoot.lookup(name); + if (type) { + const decodedMessage = (type as any).decode(message.value); + return decodedMessage.$type.toObject(decodedMessage, toObjectOptions) as PickFirst__Output; + } else { + throw new Error(`TypedStruct parsing error: unexpected type URL ${message.type_url}`); + } +} + +function convertToLoadBalancingPolicy(protoPolicy: TypedExtensionConfig__Output, selectChildPolicy: (childPolicy: LoadBalancingPolicy__Output) => LoadBalancingConfig): LoadBalancingConfig | null { + if (protoPolicy.typed_config?.type_url !== PICK_FIRST_TYPE_URL) { + throw new Error(`Pick first LB policy parsing error: unexpected type URL ${protoPolicy.typed_config?.type_url}`); + } + const pickFirstMessage = decodePickFirstConfig(protoPolicy.typed_config); + return { + pick_first: { + shuffleAddressList: pickFirstMessage.shuffle_address_list + } + }; +} + +export function setup() { + if (EXPERIMENTAL_PICK_FIRST) { + registerLbPolicy(PICK_FIRST_TYPE_URL, convertToLoadBalancingPolicy); + } +} diff --git a/packages/grpc-js-xds/src/lb-policy-registry/round-robin.ts b/packages/grpc-js-xds/src/lb-policy-registry/round-robin.ts new file mode 100644 index 000000000..b43b1c9e3 --- /dev/null +++ b/packages/grpc-js-xds/src/lb-policy-registry/round-robin.ts @@ -0,0 +1,38 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// https://github.com/grpc/proposal/blob/master/A52-xds-custom-lb-policies.md + +import { LoadBalancingConfig } from "@grpc/grpc-js"; +import { LoadBalancingPolicy__Output } from "../generated/envoy/config/cluster/v3/LoadBalancingPolicy"; +import { TypedExtensionConfig__Output } from "../generated/envoy/config/core/v3/TypedExtensionConfig"; +import { registerLbPolicy } from "../lb-policy-registry"; + +const ROUND_ROBIN_TYPE_URL = 'type.googleapis.com/envoy.extensions.load_balancing_policies.round_robin.v3.RoundRobin'; + +function convertToLoadBalancingPolicy(protoPolicy: TypedExtensionConfig__Output, selectChildPolicy: (childPolicy: LoadBalancingPolicy__Output) => LoadBalancingConfig): LoadBalancingConfig { + if (protoPolicy.typed_config?.type_url !== ROUND_ROBIN_TYPE_URL) { + throw new Error(`Round robin LB policy parsing error: unexpected type URL ${protoPolicy.typed_config?.type_url}`); + } + return { + round_robin: {} + }; +} + +export function setup() { + registerLbPolicy(ROUND_ROBIN_TYPE_URL, convertToLoadBalancingPolicy); +} diff --git a/packages/grpc-js-xds/src/lb-policy-registry/typed-struct.ts b/packages/grpc-js-xds/src/lb-policy-registry/typed-struct.ts new file mode 100644 index 000000000..b310782d7 --- /dev/null +++ b/packages/grpc-js-xds/src/lb-policy-registry/typed-struct.ts @@ -0,0 +1,116 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// https://github.com/grpc/proposal/blob/master/A52-xds-custom-lb-policies.md + +import { LoadBalancingConfig, experimental } from "@grpc/grpc-js"; +import { LoadBalancingPolicy__Output } from "../generated/envoy/config/cluster/v3/LoadBalancingPolicy"; +import { TypedExtensionConfig__Output } from "../generated/envoy/config/core/v3/TypedExtensionConfig"; +import { registerLbPolicy } from "../lb-policy-registry"; +import { loadProtosWithOptionsSync } from "@grpc/proto-loader/build/src/util"; +import { Any__Output } from "../generated/google/protobuf/Any"; +import { Struct__Output } from "../generated/google/protobuf/Struct"; +import { Value__Output } from "../generated/google/protobuf/Value"; +import { TypedStruct__Output } from "../generated/xds/type/v3/TypedStruct"; + +const XDS_TYPED_STRUCT_TYPE_URL = 'type.googleapis.com/xds.type.v3.TypedStruct'; +const UDPA_TYPED_STRUCT_TYPE_URL = 'type.googleapis.com/udpa.type.v1.TypedStruct'; + +const resourceRoot = loadProtosWithOptionsSync([ + 'xds/type/v3/typed_struct.proto', + 'udpa/type/v1/typed_struct.proto'], { + keepCase: true, + includeDirs: [ + // Paths are relative to src/build/lb-policy-registry + __dirname + '/../../../deps/xds/', + __dirname + '/../../../deps/protoc-gen-validate' + ], + } +); + +const toObjectOptions = { + longs: String, + enums: String, + defaults: true, + oneofs: true +} + +/* xds.type.v3.TypedStruct and udpa.type.v1.TypedStruct have identical interfaces */ +function decodeTypedStruct(message: Any__Output): TypedStruct__Output { + const name = message.type_url.substring(message.type_url.lastIndexOf('/') + 1); + const type = resourceRoot.lookup(name); + if (type) { + const decodedMessage = (type as any).decode(message.value); + return decodedMessage.$type.toObject(decodedMessage, toObjectOptions) as TypedStruct__Output; + } else { + throw new Error(`TypedStruct parsing error: unexpected type URL ${message.type_url}`); + } +} + +type FlatValue = boolean | null | number | string | FlatValue[] | FlatStruct; +interface FlatStruct { + [key: string]: FlatValue; +} + +function flattenValue(value: Value__Output): FlatValue { + switch (value.kind) { + case 'boolValue': + return value.boolValue!; + case 'listValue': + return value.listValue!.values.map(flattenValue); + case 'nullValue': + return null; + case 'numberValue': + return value.numberValue!; + case 'stringValue': + return value.stringValue!; + case 'structValue': + return flattenStruct(value.structValue!); + default: + throw new Error(`Struct parsing error: unexpected value kind ${value.kind}`); + } +} + +function flattenStruct(struct: Struct__Output): FlatStruct { + const result: FlatStruct = {}; + for (const [key, value] of Object.entries(struct.fields)) { + result[key] = flattenValue(value); + } + return result; +} + +function convertToLoadBalancingPolicy(protoPolicy: TypedExtensionConfig__Output, selectChildPolicy: (childPolicy: LoadBalancingPolicy__Output) => LoadBalancingConfig): LoadBalancingConfig | null { + if (protoPolicy.typed_config?.type_url !== XDS_TYPED_STRUCT_TYPE_URL && protoPolicy.typed_config?.type_url !== UDPA_TYPED_STRUCT_TYPE_URL) { + throw new Error(`Typed struct LB policy parsing error: unexpected type URL ${protoPolicy.typed_config?.type_url}`); + } + const typedStruct = decodeTypedStruct(protoPolicy.typed_config); + if (!typedStruct.value) { + throw new Error(`Typed struct LB parsing error: unexpected value ${typedStruct.value}`); + } + const policyName = typedStruct.type_url.substring(typedStruct.type_url.lastIndexOf('/') + 1); + if (!experimental.isLoadBalancerNameRegistered(policyName)) { + return null; + } + return { + [policyName]: flattenStruct(typedStruct.value) + }; +} + +export function setup() { + registerLbPolicy(XDS_TYPED_STRUCT_TYPE_URL, convertToLoadBalancingPolicy); + registerLbPolicy(UDPA_TYPED_STRUCT_TYPE_URL, convertToLoadBalancingPolicy); +} diff --git a/packages/grpc-js-xds/src/load-balancer-cds.ts b/packages/grpc-js-xds/src/load-balancer-cds.ts index d0fe2338a..bebca6fe3 100644 --- a/packages/grpc-js-xds/src/load-balancer-cds.ts +++ b/packages/grpc-js-xds/src/load-balancer-cds.ts @@ -15,18 +15,20 @@ * */ -import { connectivityState, status, Metadata, logVerbosity, experimental } from '@grpc/grpc-js'; -import { getSingletonXdsClient, XdsClient } from './xds-client'; -import { Cluster__Output } from './generated/envoy/api/v2/Cluster'; -import SubchannelAddress = experimental.SubchannelAddress; +import { connectivityState, status, Metadata, logVerbosity, experimental, LoadBalancingConfig, ChannelOptions } from '@grpc/grpc-js'; +import { getSingletonXdsClient, Watcher, XdsClient } from './xds-client'; +import { Cluster__Output } from './generated/envoy/config/cluster/v3/Cluster'; +import Endpoint = experimental.Endpoint; import UnavailablePicker = experimental.UnavailablePicker; import ChildLoadBalancerHandler = experimental.ChildLoadBalancerHandler; import LoadBalancer = experimental.LoadBalancer; import ChannelControlHelper = experimental.ChannelControlHelper; import registerLoadBalancerType = experimental.registerLoadBalancerType; -import LoadBalancingConfig = experimental.LoadBalancingConfig; -import { EdsLoadBalancingConfig } from './load-balancer-eds'; -import { Watcher } from './xds-stream-state/xds-stream-state'; +import TypedLoadBalancingConfig = experimental.TypedLoadBalancingConfig; +import QueuePicker = experimental.QueuePicker; +import parseLoadBalancingConfig = experimental.parseLoadBalancingConfig; +import { DiscoveryMechanism, XdsClusterResolverChildPolicyHandler } from './load-balancer-xds-cluster-resolver'; +import { CdsUpdate, ClusterResourceType } from './xds-resource-type/cluster-resource-type'; const TRACER_NAME = 'cds_balancer'; @@ -36,7 +38,7 @@ function trace(text: string): void { const TYPE_NAME = 'cds'; -export class CdsLoadBalancingConfig implements LoadBalancingConfig { +class CdsLoadBalancingConfig implements TypedLoadBalancingConfig { getLoadBalancerName(): string { return TYPE_NAME; } @@ -56,65 +58,201 @@ export class CdsLoadBalancingConfig implements LoadBalancingConfig { } static createFromJson(obj: any): CdsLoadBalancingConfig { - if ('cluster' in obj) { - return new CdsLoadBalancingConfig(obj.cluster); + if (!('cluster' in obj && typeof obj.cluster === 'string')) { + throw new Error('cds config must have a string field cluster'); + } + return new CdsLoadBalancingConfig(obj.cluster); + } +} + +interface ClusterEntry { + watcher: Watcher; + latestUpdate?: CdsUpdate; + children: string[]; +} + +interface ClusterTree { + [name: string]: ClusterEntry; +} + +function isClusterTreeFullyUpdated(tree: ClusterTree, root: string): boolean { + const toCheck: string[] = [root]; + const visited = new Set(); + while (toCheck.length > 0) { + const next = toCheck.shift()!; + if (visited.has(next)) { + continue; + } + visited.add(next); + if (!tree[next] || !tree[next].latestUpdate) { + return false; + } + toCheck.push(...tree[next].children); + } + return true; +} + +function generateDiscoverymechanismForCdsUpdate(config: CdsUpdate): DiscoveryMechanism { + if (config.type === 'AGGREGATE') { + throw new Error('Cannot generate DiscoveryMechanism for AGGREGATE cluster'); + } + return { + cluster: config.name, + lrs_load_reporting_server: config.lrsLoadReportingServer, + max_concurrent_requests: config.maxConcurrentRequests, + type: config.type, + eds_service_name: config.edsServiceName, + dns_hostname: config.dnsHostname, + outlier_detection: config.outlierDetectionUpdate + }; +} + +const RECURSION_DEPTH_LIMIT = 15; + +/** + * Prerequisite: isClusterTreeFullyUpdated(tree, root) + * @param tree + * @param root + */ +function getDiscoveryMechanismList(tree: ClusterTree, root: string): DiscoveryMechanism[] { + const visited = new Set(); + function getDiscoveryMechanismListHelper(node: string, depth: number): DiscoveryMechanism[] { + if (depth > RECURSION_DEPTH_LIMIT) { + throw new Error('aggregate cluster graph exceeds max depth'); + } + if (visited.has(node)) { + return []; + } + visited.add(node); + if (tree[node].children.length > 0) { + trace('Visit ' + node + ' children: [' + tree[node].children + ']'); + // Aggregate cluster + const result = []; + for (const child of tree[node].children) { + result.push(...getDiscoveryMechanismListHelper(child, depth + 1)); + } + return result; } else { - throw new Error('Missing "cluster" in cds load balancing config'); + trace('Visit leaf ' + node); + // individual cluster + const config = tree[node].latestUpdate!; + return [generateDiscoverymechanismForCdsUpdate(config)]; } } + return getDiscoveryMechanismListHelper(root, 0); } export class CdsLoadBalancer implements LoadBalancer { private childBalancer: ChildLoadBalancerHandler; - private watcher: Watcher; - - private isWatcherActive = false; private latestCdsUpdate: Cluster__Output | null = null; private latestConfig: CdsLoadBalancingConfig | null = null; private latestAttributes: { [key: string]: unknown } = {}; + private xdsClient: XdsClient | null = null; + + private clusterTree: ClusterTree = {}; + + private updatedChild = false; + + constructor(private readonly channelControlHelper: ChannelControlHelper, options: ChannelOptions) { + this.childBalancer = new XdsClusterResolverChildPolicyHandler(channelControlHelper, options); + } + + private reportError(errorMessage: string) { + trace('CDS cluster reporting error ' + errorMessage); + this.channelControlHelper.updateState(connectivityState.TRANSIENT_FAILURE, new UnavailablePicker({code: status.UNAVAILABLE, details: errorMessage, metadata: new Metadata()})); + } - constructor(private readonly channelControlHelper: ChannelControlHelper) { - this.childBalancer = new ChildLoadBalancerHandler(channelControlHelper); - this.watcher = { - onValidUpdate: (update) => { - this.latestCdsUpdate = update; - /* the lrs_server.self field indicates that the same server should be - * used for load reporting as for other xDS operations. Setting - * lrsLoadReportingServerName to the empty string sets that behavior. - * Otherwise, if the field is omitted, load reporting is disabled. */ - const edsConfig: EdsLoadBalancingConfig = new EdsLoadBalancingConfig(update.name, [], [], update.eds_cluster_config!.service_name === '' ? undefined : update.eds_cluster_config!.service_name, update.lrs_server?.self ? '' : undefined); - trace('Child update EDS config: ' + JSON.stringify(edsConfig)); - this.childBalancer.updateAddressList( - [], - edsConfig, - this.latestAttributes - ); + private addCluster(cluster: string) { + if (cluster in this.clusterTree) { + return; + } + trace('Adding watcher for cluster ' + cluster); + const watcher: Watcher = new Watcher({ + onResourceChanged: (update) => { + this.clusterTree[cluster].latestUpdate = update; + if (update.type === 'AGGREGATE') { + const children = update.aggregateChildren + trace('Received update for aggregate cluster ' + cluster + ' with children [' + children + ']'); + this.clusterTree[cluster].children = children; + children.forEach(child => this.addCluster(child)); + } + if (isClusterTreeFullyUpdated(this.clusterTree, this.latestConfig!.getCluster())) { + let discoveryMechanismList: DiscoveryMechanism[]; + try { + discoveryMechanismList = getDiscoveryMechanismList(this.clusterTree, this.latestConfig!.getCluster()); + } catch (e) { + this.reportError((e as Error).message); + return; + } + const rootClusterUpdate = this.clusterTree[this.latestConfig!.getCluster()].latestUpdate!; + const clusterResolverConfig: LoadBalancingConfig = { + xds_cluster_resolver: { + discovery_mechanisms: discoveryMechanismList, + xds_lb_policy: rootClusterUpdate.lbPolicyConfig + } + }; + let parsedClusterResolverConfig: TypedLoadBalancingConfig; + try { + parsedClusterResolverConfig = parseLoadBalancingConfig(clusterResolverConfig); + } catch (e) { + this.reportError(`CDS cluster ${this.latestConfig?.getCluster()} child config parsing failed with error ${(e as Error).message}`); + return; + } + trace('Child update config: ' + JSON.stringify(clusterResolverConfig)); + this.updatedChild = true; + this.childBalancer.updateAddressList( + [], + parsedClusterResolverConfig, + this.latestAttributes + ); + } }, onResourceDoesNotExist: () => { - this.isWatcherActive = false; - this.channelControlHelper.updateState(connectivityState.TRANSIENT_FAILURE, new UnavailablePicker({code: status.UNAVAILABLE, details: 'CDS resource does not exist', metadata: new Metadata()})); + trace('Received onResourceDoesNotExist update for cluster ' + cluster); + if (cluster in this.clusterTree) { + this.clusterTree[cluster].latestUpdate = undefined; + this.clusterTree[cluster].children = []; + } + this.reportError(`CDS resource ${cluster} does not exist`); this.childBalancer.destroy(); }, - onTransientError: (statusObj) => { - if (this.latestCdsUpdate === null) { - channelControlHelper.updateState( - connectivityState.TRANSIENT_FAILURE, - new UnavailablePicker({ - code: status.UNAVAILABLE, - details: `xDS request failed with error ${statusObj.details}`, - metadata: new Metadata(), - }) - ); + onError: (statusObj) => { + if (!this.updatedChild) { + trace('Transitioning to transient failure due to onError update for cluster' + cluster); + this.reportError(`xDS request failed with error ${statusObj.details}`); } - }, + } + }); + this.clusterTree[cluster] = { + watcher: watcher, + children: [] }; + if (this.xdsClient) { + ClusterResourceType.startWatch(this.xdsClient, cluster, watcher); + } + } + + private removeCluster(cluster: string) { + if (!(cluster in this.clusterTree)) { + return; + } + if (this.xdsClient) { + ClusterResourceType.cancelWatch(this.xdsClient, cluster, this.clusterTree[cluster].watcher); + } + delete this.clusterTree[cluster]; + } + + private clearClusterTree() { + for (const cluster of Object.keys(this.clusterTree)) { + this.removeCluster(cluster); + } } updateAddressList( - addressList: SubchannelAddress[], - lbConfig: LoadBalancingConfig, + endpointList: Endpoint[], + lbConfig: TypedLoadBalancingConfig, attributes: { [key: string]: unknown } ): void { if (!(lbConfig instanceof CdsLoadBalancingConfig)) { @@ -123,33 +261,25 @@ export class CdsLoadBalancer implements LoadBalancer { } trace('Received update with config ' + JSON.stringify(lbConfig, undefined, 2)); this.latestAttributes = attributes; + this.xdsClient = attributes.xdsClient as XdsClient; /* If the cluster is changing, disable the old watcher before adding the new * one */ if ( - this.isWatcherActive && - this.latestConfig?.getCluster() !== lbConfig.getCluster() + this.latestConfig && this.latestConfig.getCluster() !== lbConfig.getCluster() ) { - trace('Removing old cluster watcher for cluster name ' + this.latestConfig!.getCluster()); - getSingletonXdsClient().removeClusterWatcher( - this.latestConfig!.getCluster(), - this.watcher - ); - /* Setting isWatcherActive to false here lets us have one code path for - * calling addClusterWatcher */ - this.isWatcherActive = false; - /* If we have a new name, the latestCdsUpdate does not correspond to - * the new config, so it is no longer valid */ - this.latestCdsUpdate = null; + trace('Removing old cluster watchers rooted at ' + this.latestConfig.getCluster()); + this.clearClusterTree(); + this.updatedChild = false; + } + + if (!this.latestConfig) { + this.channelControlHelper.updateState(connectivityState.CONNECTING, new QueuePicker(this)); } this.latestConfig = lbConfig; - if (!this.isWatcherActive) { - trace('Adding new cluster watcher for cluster name ' + lbConfig.getCluster()); - getSingletonXdsClient().addClusterWatcher(lbConfig.getCluster(), this.watcher); - this.isWatcherActive = true; - } + this.addCluster(lbConfig.getCluster()); } exitIdle(): void { this.childBalancer.exitIdle(); @@ -158,14 +288,9 @@ export class CdsLoadBalancer implements LoadBalancer { this.childBalancer.resetBackoff(); } destroy(): void { - trace('Destroying load balancer with cluster name ' + this.latestConfig?.getCluster()); + trace('Destroying load balancer rooted at cluster named ' + this.latestConfig?.getCluster()); this.childBalancer.destroy(); - if (this.isWatcherActive) { - getSingletonXdsClient().removeClusterWatcher( - this.latestConfig!.getCluster(), - this.watcher - ); - } + this.clearClusterTree(); } getTypeName(): string { return TYPE_NAME; diff --git a/packages/grpc-js-xds/src/load-balancer-eds.ts b/packages/grpc-js-xds/src/load-balancer-eds.ts deleted file mode 100644 index 657dc3a82..000000000 --- a/packages/grpc-js-xds/src/load-balancer-eds.ts +++ /dev/null @@ -1,489 +0,0 @@ -/* - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -import { connectivityState as ConnectivityState, status as Status, Metadata, logVerbosity as LogVerbosity, experimental } from '@grpc/grpc-js'; -import { getSingletonXdsClient, XdsClient, XdsClusterDropStats } from './xds-client'; -import { ClusterLoadAssignment__Output } from './generated/envoy/api/v2/ClusterLoadAssignment'; -import { Locality__Output } from './generated/envoy/api/v2/core/Locality'; -import { LocalitySubchannelAddress, PriorityChild, PriorityLoadBalancingConfig } from './load-balancer-priority'; -import LoadBalancer = experimental.LoadBalancer; -import ChannelControlHelper = experimental.ChannelControlHelper; -import registerLoadBalancerType = experimental.registerLoadBalancerType; -import LoadBalancingConfig = experimental.LoadBalancingConfig; -import SubchannelAddress = experimental.SubchannelAddress; -import subchannelAddressToString = experimental.subchannelAddressToString; -import ChildLoadBalancerHandler = experimental.ChildLoadBalancerHandler; -import UnavailablePicker = experimental.UnavailablePicker; -import Picker = experimental.Picker; -import PickResultType = experimental.PickResultType; -import { validateLoadBalancingConfig } from '@grpc/grpc-js/build/src/experimental'; -import { WeightedTarget, WeightedTargetLoadBalancingConfig } from './load-balancer-weighted-target'; -import { LrsLoadBalancingConfig } from './load-balancer-lrs'; -import { Watcher } from './xds-stream-state/xds-stream-state'; - -const TRACER_NAME = 'eds_balancer'; - -function trace(text: string): void { - experimental.trace(LogVerbosity.DEBUG, TRACER_NAME, text); -} - -const TYPE_NAME = 'eds'; - -function localityToName(locality: Locality__Output) { - return `{region=${locality.region},zone=${locality.zone},sub_zone=${locality.sub_zone}}`; -} - -export class EdsLoadBalancingConfig implements LoadBalancingConfig { - getLoadBalancerName(): string { - return TYPE_NAME; - } - toJsonObject(): object { - const jsonObj: {[key: string]: any} = { - cluster: this.cluster, - locality_picking_policy: this.localityPickingPolicy.map(policy => policy.toJsonObject()), - endpoint_picking_policy: this.endpointPickingPolicy.map(policy => policy.toJsonObject()) - }; - if (this.edsServiceName !== undefined) { - jsonObj.eds_service_name = this.edsServiceName; - } - if (this.lrsLoadReportingServerName !== undefined) { - jsonObj.lrs_load_reporting_server_name = this.lrsLoadReportingServerName; - } - return { - [TYPE_NAME]: jsonObj - }; - } - - constructor(private cluster: string, private localityPickingPolicy: LoadBalancingConfig[], private endpointPickingPolicy: LoadBalancingConfig[], private edsServiceName?: string, private lrsLoadReportingServerName?: string) { - - } - - getCluster() { - return this.cluster; - } - - getLocalityPickingPolicy() { - return this.localityPickingPolicy; - } - - getEndpointPickingPolicy() { - return this.endpointPickingPolicy; - } - - getEdsServiceName() { - return this.edsServiceName; - } - - getLrsLoadReportingServerName() { - return this.lrsLoadReportingServerName; - } - - static createFromJson(obj: any): EdsLoadBalancingConfig { - if (!('cluster' in obj && typeof obj.cluster === 'string')) { - throw new Error('eds config must have a string field cluster'); - } - if (!('locality_picking_policy' in obj && Array.isArray(obj.locality_picking_policy))) { - throw new Error('eds config must have a locality_picking_policy array'); - } - if (!('endpoint_picking_policy' in obj && Array.isArray(obj.endpoint_picking_policy))) { - throw new Error('eds config must have an endpoint_picking_policy array'); - } - if ('eds_service_name' in obj && !(obj.eds_service_name === undefined || typeof obj.eds_service_name === 'string')) { - throw new Error('eds config eds_service_name field must be a string if provided'); - } - if ('lrs_load_reporting_server_name' in obj && (!obj.lrs_load_reporting_server_name === undefined || typeof obj.lrs_load_reporting_server_name === 'string')) { - throw new Error('eds config lrs_load_reporting_server_name must be a string if provided'); - } - return new EdsLoadBalancingConfig(obj.cluster, obj.locality_picking_policy.map(validateLoadBalancingConfig), obj.endpoint_picking_policy.map(validateLoadBalancingConfig), obj.eds_service_name, obj.lrs_load_reporting_server_name); - } -} - -/** - * This class load balances over a cluster by making an EDS request and then - * transforming the result into a configuration for another load balancing - * policy. - */ -export class EdsLoadBalancer implements LoadBalancer { - /** - * The child load balancer that will handle balancing the results of the EDS - * requests. - */ - private childBalancer: ChildLoadBalancerHandler; - private edsServiceName: string | null = null; - private watcher: Watcher; - /** - * Indicates whether the watcher has already been passed to the xdsClient - * and is getting updates. - */ - private isWatcherActive = false; - - private lastestConfig: EdsLoadBalancingConfig | null = null; - private latestAttributes: { [key: string]: unknown } = {}; - private latestEdsUpdate: ClusterLoadAssignment__Output | null = null; - - /** - * The priority of each locality the last time we got an update. - */ - private localityPriorities: Map = new Map(); - /** - * The name we assigned to each priority number the last time we got an - * update. - */ - private priorityNames: string[] = []; - - private nextPriorityChildNumber = 0; - - private clusterDropStats: XdsClusterDropStats | null = null; - - constructor(private readonly channelControlHelper: ChannelControlHelper) { - this.childBalancer = new ChildLoadBalancerHandler({ - createSubchannel: (subchannelAddress, subchannelArgs) => - this.channelControlHelper.createSubchannel( - subchannelAddress, - subchannelArgs - ), - requestReresolution: () => - this.channelControlHelper.requestReresolution(), - updateState: (connectivityState, originalPicker) => { - if (this.latestEdsUpdate === null) { - return; - } - const edsPicker: Picker = { - pick: (pickArgs) => { - const dropCategory = this.checkForDrop(); - /* If we drop the call, it ends with an UNAVAILABLE status. - * Otherwise, delegate picking the subchannel to the child - * balancer. */ - if (dropCategory === null) { - return originalPicker.pick(pickArgs); - } else { - this.clusterDropStats?.addCallDropped(dropCategory); - return { - pickResultType: PickResultType.DROP, - status: { - code: Status.UNAVAILABLE, - details: `Call dropped by load balancing policy. Category: ${dropCategory}`, - metadata: new Metadata(), - }, - subchannel: null, - extraFilterFactory: null, - onCallStarted: null, - }; - } - }, - }; - this.channelControlHelper.updateState(connectivityState, edsPicker); - }, - }); - this.watcher = { - onValidUpdate: (update) => { - trace('Received EDS update for ' + this.edsServiceName + ': ' + JSON.stringify(update, undefined, 2)); - this.latestEdsUpdate = update; - this.updateChild(); - }, - onResourceDoesNotExist: () => { - this.isWatcherActive = false; - this.channelControlHelper.updateState(ConnectivityState.TRANSIENT_FAILURE, new UnavailablePicker({code: Status.UNAVAILABLE, details: 'EDS resource does not exist', metadata: new Metadata()})); - this.childBalancer.destroy(); - }, - onTransientError: (status) => { - if (this.latestEdsUpdate === null) { - channelControlHelper.updateState( - ConnectivityState.TRANSIENT_FAILURE, - new UnavailablePicker({ - code: Status.UNAVAILABLE, - details: `xDS request failed with error ${status.details}`, - metadata: new Metadata(), - }) - ); - } - }, - }; - } - - /** - * Check whether a single call should be dropped according to the current - * policy, based on randomly chosen numbers. Returns the drop category if - * the call should be dropped, and null otherwise. - */ - private checkForDrop(): string | null { - if (!this.latestEdsUpdate?.policy) { - return null; - } - /* The drop_overloads policy is a list of pairs of category names and - * probabilities. For each one, if the random number is within that - * probability range, we drop the call citing that category. Otherwise, the - * call proceeds as usual. */ - for (const dropOverload of this.latestEdsUpdate.policy.drop_overloads) { - if (!dropOverload.drop_percentage) { - continue; - } - let randNum: number; - switch (dropOverload.drop_percentage.denominator) { - case 'HUNDRED': - randNum = Math.random() * 100; - break; - case 'TEN_THOUSAND': - randNum = Math.random() * 10_000; - break; - case 'MILLION': - randNum = Math.random() * 1_000_000; - break; - default: - continue; - } - if (randNum < dropOverload.drop_percentage.numerator) { - return dropOverload.category; - } - } - return null; - } - - /** - * Should be called when this balancer gets a new config and when the - * XdsClient returns a new ClusterLoadAssignment. - */ - private updateChild() { - if (!(this.lastestConfig && this.latestEdsUpdate)) { - return; - } - /** - * Maps each priority number to the list of localities with that priority, - * and the list of addresses associated with each locality. - */ - const priorityList: { - locality: Locality__Output; - weight: number; - addresses: SubchannelAddress[]; - }[][] = []; - /** - * New replacement for this.localityPriorities, mapping locality names to - * priority values. The replacement occurrs at the end of this method. - */ - const newLocalityPriorities: Map = new Map< - string, - number - >(); - /* We are given a list of localities, each of which has a priority. This - * loop consolidates localities into buckets by priority, while also - * simplifying the data structure to make the later steps simpler */ - for (const endpoint of this.latestEdsUpdate.endpoints) { - if (!endpoint.load_balancing_weight) { - continue; - } - const addresses: SubchannelAddress[] = endpoint.lb_endpoints.filter(lbEndpoint => lbEndpoint.health_status === 'UNKNOWN' || lbEndpoint.health_status === 'HEALTHY').map( - (lbEndpoint) => { - /* The validator in the XdsClient class ensures that each endpoint has - * a socket_address with an IP address and a port_value. */ - const socketAddress = lbEndpoint.endpoint!.address!.socket_address!; - return { - host: socketAddress.address!, - port: socketAddress.port_value!, - }; - } - ); - if (addresses.length > 0) { - let localityArray = priorityList[endpoint.priority]; - if (localityArray === undefined) { - localityArray = []; - priorityList[endpoint.priority] = localityArray; - } - localityArray.push({ - locality: endpoint.locality!, - addresses: addresses, - weight: endpoint.load_balancing_weight.value, - }); - newLocalityPriorities.set( - localityToName(endpoint.locality!), - endpoint.priority - ); - } - } - - const newPriorityNames: string[] = []; - const addressList: LocalitySubchannelAddress[] = []; - const priorityChildren: Map = new Map< - string, - PriorityChild - >(); - /* The algorithm here is as follows: for each priority we are given, from - * high to low: - * - If the previous mapping had any of the same localities at the same or - * a lower priority, use the matching name from the highest such - * priority, unless the new mapping has already used that name. - * - Otherwise, construct a new name using this.nextPriorityChildNumber. - */ - for (const [priority, localityArray] of priorityList.entries()) { - // Skip priorities that have no localities with healthy endpoints - if (localityArray === undefined) { - continue; - } - /** - * Highest (smallest number) priority value that any of the localities in - * this locality array had a in the previous mapping. - */ - let highestOldPriority = Infinity; - for (const localityObj of localityArray) { - const oldPriority = this.localityPriorities.get( - localityToName(localityObj.locality) - ); - if ( - oldPriority !== undefined && - oldPriority >= priority && - oldPriority < highestOldPriority - ) { - highestOldPriority = oldPriority; - } - } - let newPriorityName: string; - if (highestOldPriority === Infinity) { - /* No existing priority at or below the same number as the priority we - * are looking at had any of the localities in this priority. So, we - * use a new name. */ - newPriorityName = `child${this.nextPriorityChildNumber++}`; - } else { - const newName = this.priorityNames[highestOldPriority]; - if (newPriorityNames.indexOf(newName) < 0) { - newPriorityName = newName; - } else { - newPriorityName = `child${this.nextPriorityChildNumber++}`; - } - } - newPriorityNames[priority] = newPriorityName; - - const childTargets: Map = new Map< - string, - WeightedTarget - >(); - for (const localityObj of localityArray) { - /* Use the endpoint picking policy from the config, default to - * round_robin. */ - const endpointPickingPolicy: LoadBalancingConfig[] = [ - ...this.lastestConfig.getEndpointPickingPolicy(), - validateLoadBalancingConfig({ round_robin: {} }), - ]; - let childPolicy: LoadBalancingConfig[]; - if (this.lastestConfig.getLrsLoadReportingServerName() !== undefined) { - childPolicy = [new LrsLoadBalancingConfig(this.lastestConfig.getCluster(), this.lastestConfig.getEdsServiceName() ?? '', this.lastestConfig.getLrsLoadReportingServerName()!, localityObj.locality, endpointPickingPolicy)]; - } else { - childPolicy = endpointPickingPolicy; - } - childTargets.set(localityToName(localityObj.locality), { - weight: localityObj.weight, - child_policy: childPolicy, - }); - for (const address of localityObj.addresses) { - addressList.push({ - localityPath: [ - newPriorityName, - localityToName(localityObj.locality), - ], - ...address, - }); - } - } - - priorityChildren.set(newPriorityName, { - config: [ - new WeightedTargetLoadBalancingConfig(childTargets), - ], - }); - } - /* Contract the priority names array if it is sparse. This config only - * cares about the order of priorities, not their specific numbers */ - const childConfig: PriorityLoadBalancingConfig = new PriorityLoadBalancingConfig(priorityChildren, newPriorityNames.filter((value) => value !== undefined)); - trace('Child update addresses: ' + addressList.map(address => '(' + subchannelAddressToString(address) + ' path=' + address.localityPath + ')')); - trace('Child update priority config: ' + JSON.stringify(childConfig.toJsonObject(), undefined, 2)); - this.childBalancer.updateAddressList( - addressList, - childConfig, - this.latestAttributes - ); - - this.localityPriorities = newLocalityPriorities; - this.priorityNames = newPriorityNames; - } - - updateAddressList( - addressList: SubchannelAddress[], - lbConfig: LoadBalancingConfig, - attributes: { [key: string]: unknown } - ): void { - if (!(lbConfig instanceof EdsLoadBalancingConfig)) { - trace('Discarding address list update with unrecognized config ' + JSON.stringify(lbConfig.toJsonObject(), undefined, 2)); - return; - } - trace('Received update with config: ' + JSON.stringify(lbConfig, undefined, 2)); - this.lastestConfig = lbConfig; - this.latestAttributes = attributes; - const newEdsServiceName = lbConfig.getEdsServiceName() ?? lbConfig.getCluster(); - - /* If the name is changing, disable the old watcher before adding the new - * one */ - if (this.isWatcherActive && this.edsServiceName !== newEdsServiceName) { - trace('Removing old endpoint watcher for edsServiceName ' + this.edsServiceName) - getSingletonXdsClient().removeEndpointWatcher(this.edsServiceName!, this.watcher); - /* Setting isWatcherActive to false here lets us have one code path for - * calling addEndpointWatcher */ - this.isWatcherActive = false; - /* If we have a new name, the latestEdsUpdate does not correspond to - * the new config, so it is no longer valid */ - this.latestEdsUpdate = null; - } - - this.edsServiceName = newEdsServiceName; - - if (!this.isWatcherActive) { - trace('Adding new endpoint watcher for edsServiceName ' + this.edsServiceName); - getSingletonXdsClient().addEndpointWatcher(this.edsServiceName, this.watcher); - this.isWatcherActive = true; - } - - if (lbConfig.getLrsLoadReportingServerName()) { - this.clusterDropStats = getSingletonXdsClient().addClusterDropStats( - lbConfig.getLrsLoadReportingServerName()!, - lbConfig.getCluster(), - lbConfig.getEdsServiceName() ?? '' - ); - } - - /* If updateAddressList is called after receiving an update and the update - * is still valid, we want to update the child config with the information - * in the new EdsLoadBalancingConfig. */ - this.updateChild(); - } - exitIdle(): void { - this.childBalancer.exitIdle(); - } - resetBackoff(): void { - this.childBalancer.resetBackoff(); - } - destroy(): void { - trace('Destroying load balancer with edsServiceName ' + this.edsServiceName); - if (this.edsServiceName) { - getSingletonXdsClient().removeEndpointWatcher(this.edsServiceName, this.watcher); - } - this.childBalancer.destroy(); - } - getTypeName(): string { - return TYPE_NAME; - } -} - -export function setup() { - registerLoadBalancerType(TYPE_NAME, EdsLoadBalancer, EdsLoadBalancingConfig); -} diff --git a/packages/grpc-js-xds/src/load-balancer-lrs.ts b/packages/grpc-js-xds/src/load-balancer-lrs.ts deleted file mode 100644 index 0792b11c2..000000000 --- a/packages/grpc-js-xds/src/load-balancer-lrs.ts +++ /dev/null @@ -1,238 +0,0 @@ -/* - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -import { connectivityState as ConnectivityState, StatusObject, status as Status, experimental } from '@grpc/grpc-js'; -import { Locality__Output } from './generated/envoy/api/v2/core/Locality'; -import { XdsClusterLocalityStats, XdsClient, getSingletonXdsClient } from './xds-client'; -import LoadBalancer = experimental.LoadBalancer; -import ChannelControlHelper = experimental.ChannelControlHelper; -import registerLoadBalancerType = experimental.registerLoadBalancerType; -import getFirstUsableConfig = experimental.getFirstUsableConfig; -import SubchannelAddress = experimental.SubchannelAddress; -import LoadBalancingConfig = experimental.LoadBalancingConfig; -import ChildLoadBalancerHandler = experimental.ChildLoadBalancerHandler; -import Picker = experimental.Picker; -import PickArgs = experimental.PickArgs; -import PickResultType = experimental.PickResultType; -import PickResult = experimental.PickResult; -import Filter = experimental.Filter; -import BaseFilter = experimental.BaseFilter; -import FilterFactory = experimental.FilterFactory; -import FilterStackFactory = experimental.FilterStackFactory; -import Call = experimental.CallStream; -import validateLoadBalancingConfig = experimental.validateLoadBalancingConfig - -const TYPE_NAME = 'lrs'; - -export class LrsLoadBalancingConfig implements LoadBalancingConfig { - getLoadBalancerName(): string { - return TYPE_NAME; - } - toJsonObject(): object { - return { - [TYPE_NAME]: { - cluster_name: this.clusterName, - eds_service_name: this.edsServiceName, - lrs_load_reporting_server_name: this.lrsLoadReportingServerName, - locality: this.locality, - child_policy: this.childPolicy.map(policy => policy.toJsonObject()) - } - } - } - - constructor(private clusterName: string, private edsServiceName: string, private lrsLoadReportingServerName: string, private locality: Locality__Output, private childPolicy: LoadBalancingConfig[]) {} - - getClusterName() { - return this.clusterName; - } - - getEdsServiceName() { - return this.edsServiceName; - } - - getLrsLoadReportingServerName() { - return this.lrsLoadReportingServerName; - } - - getLocality() { - return this.locality; - } - - getChildPolicy() { - return this.childPolicy; - } - - static createFromJson(obj: any): LrsLoadBalancingConfig { - if (!('cluster_name' in obj && typeof obj.cluster_name === 'string')) { - throw new Error('lrs config must have a string field cluster_name'); - } - if (!('eds_service_name' in obj && typeof obj.eds_service_name === 'string')) { - throw new Error('lrs config must have a string field eds_service_name'); - } - if (!('lrs_load_reporting_server_name' in obj && typeof obj.lrs_load_reporting_server_name === 'string')) { - throw new Error('lrs config must have a string field lrs_load_reporting_server_name'); - } - if (!('locality' in obj && obj.locality !== null && typeof obj.locality === 'object')) { - throw new Error('lrs config must have an object field locality'); - } - if ('region' in obj.locality && typeof obj.locality.region !== 'string') { - throw new Error('lrs config locality.region field must be a string if provided'); - } - if ('zone' in obj.locality && typeof obj.locality.zone !== 'string') { - throw new Error('lrs config locality.zone field must be a string if provided'); - } - if ('sub_zone' in obj.locality && typeof obj.locality.sub_zone !== 'string') { - throw new Error('lrs config locality.sub_zone field must be a string if provided'); - } - if (!('child_policy' in obj && Array.isArray(obj.child_policy))) { - throw new Error('lrs config must have a child_policy array'); - } - return new LrsLoadBalancingConfig(obj.cluster_name, obj.eds_service_name, obj.lrs_load_reporting_server_name, { - region: obj.locality.region ?? '', - zone: obj.locality.zone ?? '', - sub_zone: obj.locality.sub_zone ?? '' - }, obj.child_policy.map(validateLoadBalancingConfig)); - } -} - -/** - * Filter class that reports when the call ends. - */ -class CallEndTrackingFilter extends BaseFilter implements Filter { - constructor(private localityStatsReporter: XdsClusterLocalityStats) { - super(); - } - - receiveTrailers(status: StatusObject) { - this.localityStatsReporter.addCallFinished(status.code !== Status.OK); - return status; - } -} - -class CallEndTrackingFilterFactory - implements FilterFactory { - constructor(private localityStatsReporter: XdsClusterLocalityStats) {} - - createFilter(callStream: Call): CallEndTrackingFilter { - return new CallEndTrackingFilter(this.localityStatsReporter); - } -} - -/** - * Picker that delegates picking to another picker, and reports when calls - * created using those picks start and end. - */ -class LoadReportingPicker implements Picker { - constructor( - private wrappedPicker: Picker, - private localityStatsReporter: XdsClusterLocalityStats - ) {} - - pick(pickArgs: PickArgs): PickResult { - const wrappedPick = this.wrappedPicker.pick(pickArgs); - if (wrappedPick.pickResultType === PickResultType.COMPLETE) { - const trackingFilterFactory = new CallEndTrackingFilterFactory( - this.localityStatsReporter - ); - /* In the unlikely event that the wrappedPick already has an - * extraFilterFactory, preserve it in a FilterStackFactory. */ - const extraFilterFactory = wrappedPick.extraFilterFactory - ? new FilterStackFactory([ - wrappedPick.extraFilterFactory, - trackingFilterFactory, - ]) - : trackingFilterFactory; - return { - pickResultType: PickResultType.COMPLETE, - subchannel: wrappedPick.subchannel, - status: null, - onCallStarted: () => { - wrappedPick.onCallStarted?.(); - this.localityStatsReporter.addCallStarted(); - }, - extraFilterFactory: extraFilterFactory, - }; - } else { - return wrappedPick; - } - } -} - -/** - * "Load balancer" that delegates the actual load balancing logic to another - * LoadBalancer class and adds hooks to track when calls started using that - * LoadBalancer start and end, and uses the XdsClient to report that - * information back to the xDS server. - */ -export class LrsLoadBalancer implements LoadBalancer { - private childBalancer: ChildLoadBalancerHandler; - private localityStatsReporter: XdsClusterLocalityStats | null = null; - - constructor(private channelControlHelper: ChannelControlHelper) { - this.childBalancer = new ChildLoadBalancerHandler({ - createSubchannel: (subchannelAddress, subchannelArgs) => - channelControlHelper.createSubchannel( - subchannelAddress, - subchannelArgs - ), - requestReresolution: () => channelControlHelper.requestReresolution(), - updateState: (connectivityState: ConnectivityState, picker: Picker) => { - if (this.localityStatsReporter !== null) { - picker = new LoadReportingPicker(picker, this.localityStatsReporter); - } - channelControlHelper.updateState(connectivityState, picker); - }, - }); - } - - updateAddressList( - addressList: SubchannelAddress[], - lbConfig: LoadBalancingConfig, - attributes: { [key: string]: unknown } - ): void { - if (!(lbConfig instanceof LrsLoadBalancingConfig)) { - return; - } - this.localityStatsReporter = getSingletonXdsClient().addClusterLocalityStats( - lbConfig.getLrsLoadReportingServerName(), - lbConfig.getClusterName(), - lbConfig.getEdsServiceName(), - lbConfig.getLocality() - ); - const childPolicy: LoadBalancingConfig = getFirstUsableConfig( - lbConfig.getChildPolicy(), - true - ); - this.childBalancer.updateAddressList(addressList, childPolicy, attributes); - } - exitIdle(): void { - this.childBalancer.exitIdle(); - } - resetBackoff(): void { - this.childBalancer.resetBackoff(); - } - destroy(): void { - this.childBalancer.destroy(); - } - getTypeName(): string { - return TYPE_NAME; - } -} - -export function setup() { - registerLoadBalancerType(TYPE_NAME, LrsLoadBalancer, LrsLoadBalancingConfig); -} diff --git a/packages/grpc-js-xds/src/load-balancer-priority.ts b/packages/grpc-js-xds/src/load-balancer-priority.ts index 872ed7d1b..54e01fa84 100644 --- a/packages/grpc-js-xds/src/load-balancer-priority.ts +++ b/packages/grpc-js-xds/src/load-balancer-priority.ts @@ -15,19 +15,19 @@ * */ -import { connectivityState as ConnectivityState, status as Status, Metadata, logVerbosity as LogVerbosity, experimental, ChannelOptions } from '@grpc/grpc-js'; -import validateLoadBalancingConfig = experimental.validateLoadBalancingConfig; +import { connectivityState as ConnectivityState, status as Status, Metadata, logVerbosity as LogVerbosity, experimental, LoadBalancingConfig, ChannelOptions } from '@grpc/grpc-js'; import LoadBalancer = experimental.LoadBalancer; import ChannelControlHelper = experimental.ChannelControlHelper; -import getFirstUsableConfig = experimental.getFirstUsableConfig; import registerLoadBalancerType = experimental.registerLoadBalancerType; -import SubchannelAddress = experimental.SubchannelAddress; -import subchannelAddressToString = experimental.subchannelAddressToString; -import LoadBalancingConfig = experimental.LoadBalancingConfig; +import Endpoint = experimental.Endpoint; +import endpointToString = experimental.endpointToString; +import TypedLoadBalancingConfig = experimental.TypedLoadBalancingConfig; import Picker = experimental.Picker; import QueuePicker = experimental.QueuePicker; import UnavailablePicker = experimental.UnavailablePicker; import ChildLoadBalancerHandler = experimental.ChildLoadBalancerHandler; +import selectLbConfigFromList = experimental.selectLbConfigFromList; +import { Locality__Output } from './generated/envoy/config/core/v3/Locality'; const TRACER_NAME = 'priority'; @@ -40,21 +40,60 @@ const TYPE_NAME = 'priority'; const DEFAULT_FAILOVER_TIME_MS = 10_000; const DEFAULT_RETENTION_INTERVAL_MS = 15 * 60 * 1000; -export type LocalitySubchannelAddress = SubchannelAddress & { +export interface LocalityEndpoint extends Endpoint { + /** + * A sequence of strings that determines how to divide endpoints up in priority and + * weighted_target. + */ localityPath: string[]; + /** + * The locality this endpoint is in. Used in wrr_locality and xds_cluster_impl. + */ + locality: Locality__Output; + /** + * The load balancing weight for the entire locality that contains this + * endpoint. Used in xds_wrr_locality. + */ + localityWeight: number; + /** + * The overall load balancing weight for this endpoint, calculated as the + * product of the load balancing weight for this endpoint within its locality + * and the load balancing weight of the locality. Used in ring_hash. + */ + endpointWeight: number; }; -export function isLocalitySubchannelAddress( - address: SubchannelAddress -): address is LocalitySubchannelAddress { - return Array.isArray((address as LocalitySubchannelAddress).localityPath); +export function isLocalityEndpoint( + address: Endpoint +): address is LocalityEndpoint { + return Array.isArray((address as LocalityEndpoint).localityPath); } -export interface PriorityChild { +/** + * Type of the config for an individual child in the JSON representation of + * a priority LB policy config. + */ +export interface PriorityChildRaw { config: LoadBalancingConfig[]; + ignore_reresolution_requests: boolean; } -export class PriorityLoadBalancingConfig implements LoadBalancingConfig { +/** + * The JSON representation of the config for the priority LB policy. The + * LoadBalancingConfig for a priority policy should have the form + * { priority: PriorityRawConfig } + */ +export interface PriorityRawConfig { + children: {[name: string]: PriorityChildRaw}; + priorities: string[]; +} + +interface PriorityChild { + config: TypedLoadBalancingConfig; + ignore_reresolution_requests: boolean; +} + +class PriorityLoadBalancingConfig implements TypedLoadBalancingConfig { getLoadBalancerName(): string { return TYPE_NAME; } @@ -62,7 +101,8 @@ export class PriorityLoadBalancingConfig implements LoadBalancingConfig { const childrenField: {[key: string]: object} = {} for (const [childName, childValue] of this.children.entries()) { childrenField[childName] = { - config: childValue.config.map(value => value.toJsonObject()) + config: [childValue.config.toJsonObject()], + ignore_reresolution_requests: childValue.ignore_reresolution_requests }; } return { @@ -92,13 +132,21 @@ export class PriorityLoadBalancingConfig implements LoadBalancingConfig { throw new Error('Priority config must have a priorities list'); } const childrenMap: Map = new Map(); - for (const childName of obj.children) { + for (const childName of Object.keys(obj.children)) { const childObj = obj.children[childName] if (!('config' in childObj && Array.isArray(childObj.config))) { throw new Error(`Priority child ${childName} must have a config list`); } + if (!('ignore_reresolution_requests' in childObj && typeof childObj.ignore_reresolution_requests === 'boolean')) { + throw new Error(`Priority child ${childName} must have a boolean field ignore_reresolution_requests`); + } + const childConfig = selectLbConfigFromList(childObj.config); + if (!childConfig) { + throw new Error(`Priority child ${childName} config parsing failed`); + } childrenMap.set(childName, { - config: childObj.config.map(validateLoadBalancingConfig) + config: childConfig, + ignore_reresolution_requests: childObj.ignore_reresolution_requests }); } return new PriorityLoadBalancingConfig(childrenMap, obj.priorities); @@ -107,15 +155,14 @@ export class PriorityLoadBalancingConfig implements LoadBalancingConfig { interface PriorityChildBalancer { updateAddressList( - addressList: SubchannelAddress[], - lbConfig: LoadBalancingConfig, + endpointList: Endpoint[], + lbConfig: TypedLoadBalancingConfig, attributes: { [key: string]: unknown } ): void; exitIdle(): void; resetBackoff(): void; deactivate(): void; maybeReactivate(): void; - cancelFailoverTimer(): void; isFailoverTimerPending(): boolean; getConnectivityState(): ConnectivityState; getPicker(): Picker; @@ -124,8 +171,9 @@ interface PriorityChildBalancer { } interface UpdateArgs { - subchannelAddress: SubchannelAddress[]; - lbConfig: LoadBalancingConfig; + subchannelAddress: Endpoint[]; + lbConfig: TypedLoadBalancingConfig; + ignoreReresolutionRequests: boolean; } export class PriorityLoadBalancer implements LoadBalancer { @@ -138,31 +186,37 @@ export class PriorityLoadBalancer implements LoadBalancer { private childBalancer: ChildLoadBalancerHandler; private failoverTimer: NodeJS.Timer | null = null; private deactivationTimer: NodeJS.Timer | null = null; - constructor(private parent: PriorityLoadBalancer, private name: string) { - this.childBalancer = new ChildLoadBalancerHandler({ - createSubchannel: ( - subchannelAddress: SubchannelAddress, - subchannelArgs: ChannelOptions - ) => { - return this.parent.channelControlHelper.createSubchannel( - subchannelAddress, - subchannelArgs - ); - }, + private seenReadyOrIdleSinceTransientFailure = false; + constructor(private parent: PriorityLoadBalancer, private name: string, ignoreReresolutionRequests: boolean) { + this.childBalancer = new ChildLoadBalancerHandler(experimental.createChildChannelControlHelper(this.parent.channelControlHelper, { updateState: (connectivityState: ConnectivityState, picker: Picker) => { this.updateState(connectivityState, picker); }, requestReresolution: () => { - this.parent.channelControlHelper.requestReresolution(); - }, - }); + if (!ignoreReresolutionRequests) { + this.parent.channelControlHelper.requestReresolution(); + } + } + }), parent.options); this.picker = new QueuePicker(this.childBalancer); + this.startFailoverTimer(); } private updateState(connectivityState: ConnectivityState, picker: Picker) { trace('Child ' + this.name + ' ' + ConnectivityState[this.connectivityState] + ' -> ' + ConnectivityState[connectivityState]); this.connectivityState = connectivityState; this.picker = picker; + if (connectivityState === ConnectivityState.CONNECTING) { + if (this.seenReadyOrIdleSinceTransientFailure && this.failoverTimer === null) { + this.startFailoverTimer(); + } + } else if (connectivityState === ConnectivityState.READY || connectivityState === ConnectivityState.IDLE) { + this.seenReadyOrIdleSinceTransientFailure = true; + this.cancelFailoverTimer(); + } else if (connectivityState === ConnectivityState.TRANSIENT_FAILURE) { + this.seenReadyOrIdleSinceTransientFailure = false; + this.cancelFailoverTimer(); + } this.parent.onChildStateChange(this); } @@ -181,18 +235,14 @@ export class PriorityLoadBalancer implements LoadBalancer { } updateAddressList( - addressList: SubchannelAddress[], - lbConfig: LoadBalancingConfig, + endpointList: Endpoint[], + lbConfig: TypedLoadBalancingConfig, attributes: { [key: string]: unknown } ): void { - this.childBalancer.updateAddressList(addressList, lbConfig, attributes); - this.startFailoverTimer(); + this.childBalancer.updateAddressList(endpointList, lbConfig, attributes); } exitIdle() { - if (this.connectivityState === ConnectivityState.IDLE) { - this.startFailoverTimer(); - } this.childBalancer.exitIdle(); } @@ -216,7 +266,7 @@ export class PriorityLoadBalancer implements LoadBalancer { } } - cancelFailoverTimer() { + private cancelFailoverTimer() { if (this.failoverTimer !== null) { clearTimeout(this.failoverTimer); this.failoverTimer = null; @@ -270,16 +320,10 @@ export class PriorityLoadBalancer implements LoadBalancer { * Current chosen priority that requests are sent to */ private currentPriority: number | null = null; - /** - * After an update, this preserves the currently selected child from before - * the update. We continue to use that child until it disconnects, or - * another higher-priority child connects, or it is deleted because it is not - * in the new priority list at all and its retention interval has expired, or - * we try and fail to connect to every child in the new priority list. - */ - private currentChildFromBeforeUpdate: PriorityChildBalancer | null = null; - constructor(private channelControlHelper: ChannelControlHelper) {} + private updatesPaused = false; + + constructor(private channelControlHelper: ChannelControlHelper, private options: ChannelOptions) {} private updateState(state: ConnectivityState, picker: Picker) { trace( @@ -290,7 +334,7 @@ export class PriorityLoadBalancer implements LoadBalancer { * so that when the picker calls exitIdle, that in turn calls exitIdle on * the PriorityChildImpl, which will start the failover timer. */ if (state === ConnectivityState.IDLE) { - picker = new QueuePicker(this); + picker = new QueuePicker(this, picker); } this.channelControlHelper.updateState(state, picker); } @@ -298,57 +342,14 @@ export class PriorityLoadBalancer implements LoadBalancer { private onChildStateChange(child: PriorityChildBalancer) { const childState = child.getConnectivityState(); trace('Child ' + child.getName() + ' transitioning to ' + ConnectivityState[childState]); - if (child === this.currentChildFromBeforeUpdate) { - if ( - childState === ConnectivityState.READY || - childState === ConnectivityState.IDLE - ) { - this.updateState(childState, child.getPicker()); - } else { - this.currentChildFromBeforeUpdate = null; - this.tryNextPriority(true); - } - return; - } - const childPriority = this.priorities.indexOf(child.getName()); - if (childPriority < 0) { - // child is not in the priority list, ignore updates + if (this.updatesPaused) { return; } - if (this.currentPriority !== null && childPriority > this.currentPriority) { - // child is lower priority than the currently selected child, ignore updates - return; - } - if (childState === ConnectivityState.TRANSIENT_FAILURE) { - /* Report connecting if and only if the currently selected child is the - * one entering TRANSIENT_FAILURE */ - this.tryNextPriority(childPriority === this.currentPriority); - return; - } - if (this.currentPriority === null || childPriority < this.currentPriority) { - /* In this case, either there is no currently selected child or this - * child is higher priority than the currently selected child, so we want - * to switch to it if it is READY or IDLE. */ - if ( - childState === ConnectivityState.READY || - childState === ConnectivityState.IDLE - ) { - this.selectPriority(childPriority); - } - return; - } - /* The currently selected child has updated state to something other than - * TRANSIENT_FAILURE, so we pass that update along */ - this.updateState(childState, child.getPicker()); + this.choosePriority(); } private deleteChild(child: PriorityChildBalancer) { - if (child === this.currentChildFromBeforeUpdate) { - this.currentChildFromBeforeUpdate = null; - /* If we get to this point, the currentChildFromBeforeUpdate was still in - * use, so we are still trying to connect to the specified priorities */ - this.tryNextPriority(true); - } + this.children.delete(child.getName()); } /** @@ -357,80 +358,79 @@ export class PriorityLoadBalancer implements LoadBalancer { * child connects. * @param priority */ - private selectPriority(priority: number) { + private selectPriority(priority: number, deactivateLowerPriorities: boolean) { this.currentPriority = priority; const chosenChild = this.children.get(this.priorities[priority])!; - chosenChild.cancelFailoverTimer(); this.updateState( chosenChild.getConnectivityState(), chosenChild.getPicker() ); - this.currentChildFromBeforeUpdate = null; - // Deactivate each child of lower priority than the chosen child - for (let i = priority + 1; i < this.priorities.length; i++) { - this.children.get(this.priorities[i])?.deactivate(); + if (deactivateLowerPriorities) { + for (let i = priority + 1; i < this.priorities.length; i++) { + this.children.get(this.priorities[i])?.deactivate(); + } } } - /** - * Check each child in priority order until we find one to use - * @param reportConnecting Whether we should report a CONNECTING state if we - * stop before picking a specific child. This should be true when we have - * not already selected a child. - */ - private tryNextPriority(reportConnecting: boolean) { - for (const [index, childName] of this.priorities.entries()) { + private choosePriority() { + if (this.priorities.length === 0) { + this.updateState(ConnectivityState.TRANSIENT_FAILURE, new UnavailablePicker({code: Status.UNAVAILABLE, details: 'priority policy has empty priority list', metadata: new Metadata()})); + return; + } + + for (const [priority, childName] of this.priorities.entries()) { + trace('Trying priority ' + priority + ' child ' + childName); let child = this.children.get(childName); /* If the child doesn't already exist, create it and update it. */ if (child === undefined) { - if (reportConnecting) { - this.updateState(ConnectivityState.CONNECTING, new QueuePicker(this)); - } - child = new this.PriorityChildImpl(this, childName); - this.children.set(childName, child); const childUpdate = this.latestUpdates.get(childName); - if (childUpdate !== undefined) { - child.updateAddressList( - childUpdate.subchannelAddress, - childUpdate.lbConfig, - this.latestAttributes - ); + if (childUpdate === undefined) { + continue; } + child = new this.PriorityChildImpl(this, childName, childUpdate.ignoreReresolutionRequests); + this.children.set(childName, child); + child.updateAddressList( + childUpdate.subchannelAddress, + childUpdate.lbConfig, + this.latestAttributes + ); + } else { + /* We're going to try to use this child, so reactivate it if it has been + * deactivated */ + child.maybeReactivate(); } - /* We're going to try to use this child, so reactivate it if it has been - * deactivated */ - child.maybeReactivate(); if ( child.getConnectivityState() === ConnectivityState.READY || child.getConnectivityState() === ConnectivityState.IDLE ) { - this.selectPriority(index); + this.selectPriority(priority, true); return; } if (child.isFailoverTimerPending()) { + this.selectPriority(priority, false); /* This child is still trying to connect. Wait until its failover timer - * has ended to continue to the next one */ - if (reportConnecting) { - this.updateState(ConnectivityState.CONNECTING, new QueuePicker(this)); - } + * has ended to continue to the next one */ return; } } - this.currentPriority = null; - this.currentChildFromBeforeUpdate = null; - this.updateState( - ConnectivityState.TRANSIENT_FAILURE, - new UnavailablePicker({ - code: Status.UNAVAILABLE, - details: 'No ready priority', - metadata: new Metadata(), - }) - ); + + /* If we didn't find any priority to try, pick the first one in the state + * CONNECTING */ + for (const [priority, childName] of this.priorities.entries()) { + let child = this.children.get(childName)!; + if (child.getConnectivityState() === ConnectivityState.CONNECTING) { + this.selectPriority(priority, false); + return; + } + } + + // Did not find any child in CONNECTING, delegate to last child + this.selectPriority(this.priorities.length - 1, false); } updateAddressList( - addressList: SubchannelAddress[], - lbConfig: LoadBalancingConfig, + endpointList: Endpoint[], + lbConfig: TypedLoadBalancingConfig, attributes: { [key: string]: unknown } ): void { if (!(lbConfig instanceof PriorityLoadBalancingConfig)) { @@ -442,23 +442,23 @@ export class PriorityLoadBalancer implements LoadBalancer { * which child it belongs to. So we bucket those addresses by that first * element, and pass along the rest of the localityPath for that child * to use. */ - const childAddressMap: Map = new Map< + const childAddressMap: Map = new Map< string, - LocalitySubchannelAddress[] + LocalityEndpoint[] >(); - for (const address of addressList) { - if (!isLocalitySubchannelAddress(address)) { + for (const endpoint of endpointList) { + if (!isLocalityEndpoint(endpoint)) { // Reject address that cannot be prioritized return; } - if (address.localityPath.length < 1) { + if (endpoint.localityPath.length < 1) { // Reject address that cannot be prioritized return; } - const childName = address.localityPath[0]; - const childAddress: LocalitySubchannelAddress = { - ...address, - localityPath: address.localityPath.slice(1), + const childName = endpoint.localityPath[0]; + const childAddress: LocalityEndpoint = { + ...endpoint, + localityPath: endpoint.localityPath.slice(1), }; let childAddressList = childAddressMap.get(childName); if (childAddressList === undefined) { @@ -467,34 +467,27 @@ export class PriorityLoadBalancer implements LoadBalancer { } childAddressList.push(childAddress); } - if (this.currentPriority !== null) { - this.currentChildFromBeforeUpdate = this.children.get( - this.priorities[this.currentPriority] - )!; - this.currentPriority = null; - } this.latestAttributes = attributes; this.latestUpdates.clear(); this.priorities = lbConfig.getPriorities(); + this.updatesPaused = true; /* Pair up the new child configs with the corresponding address lists, and * update all existing children with their new configs */ for (const [childName, childConfig] of lbConfig.getChildren()) { - const chosenChildConfig = getFirstUsableConfig(childConfig.config); - if (chosenChildConfig !== null) { - const childAddresses = childAddressMap.get(childName) ?? []; - trace('Assigning child ' + childName + ' address list ' + childAddresses.map(address => '(' + subchannelAddressToString(address) + ' path=' + address.localityPath + ')')) - this.latestUpdates.set(childName, { - subchannelAddress: childAddresses, - lbConfig: chosenChildConfig, - }); - const existingChild = this.children.get(childName); - if (existingChild !== undefined) { - existingChild.updateAddressList( - childAddresses, - chosenChildConfig, - attributes - ); - } + const childAddresses = childAddressMap.get(childName) ?? []; + trace('Assigning child ' + childName + ' endpoint list ' + childAddresses.map(endpoint => '(' + endpointToString(endpoint) + ' path=' + endpoint.localityPath + ')')) + this.latestUpdates.set(childName, { + subchannelAddress: childAddresses, + lbConfig: childConfig.config, + ignoreReresolutionRequests: childConfig.ignore_reresolution_requests + }); + const existingChild = this.children.get(childName); + if (existingChild !== undefined) { + existingChild.updateAddressList( + childAddresses, + childConfig.config, + attributes + ); } } // Deactivate all children that are no longer in the priority list @@ -504,8 +497,8 @@ export class PriorityLoadBalancer implements LoadBalancer { child.deactivate(); } } - // Only report connecting if there are no existing children - this.tryNextPriority(this.children.size === 0); + this.updatesPaused = false; + this.choosePriority(); } exitIdle(): void { if (this.currentPriority !== null) { @@ -522,8 +515,6 @@ export class PriorityLoadBalancer implements LoadBalancer { child.destroy(); } this.children.clear(); - this.currentChildFromBeforeUpdate?.destroy(); - this.currentChildFromBeforeUpdate = null; } getTypeName(): string { return TYPE_NAME; diff --git a/packages/grpc-js-xds/src/load-balancer-ring-hash.ts b/packages/grpc-js-xds/src/load-balancer-ring-hash.ts new file mode 100644 index 000000000..a124d3b88 --- /dev/null +++ b/packages/grpc-js-xds/src/load-balancer-ring-hash.ts @@ -0,0 +1,507 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { experimental, logVerbosity, connectivityState, status, Metadata, ChannelOptions, LoadBalancingConfig } from '@grpc/grpc-js'; +import { isLocalityEndpoint } from './load-balancer-priority'; +import TypedLoadBalancingConfig = experimental.TypedLoadBalancingConfig; +import LeafLoadBalancer = experimental.LeafLoadBalancer; +import Endpoint = experimental.Endpoint; +import Picker = experimental.Picker; +import PickArgs = experimental.PickArgs; +import PickResult = experimental.PickResult; +import PickResultType = experimental.PickResultType; +import LoadBalancer = experimental.LoadBalancer; +import ChannelControlHelper = experimental.ChannelControlHelper; +import createChildChannelControlHelper = experimental.createChildChannelControlHelper; +import UnavailablePicker = experimental.UnavailablePicker; +import subchannelAddressToString = experimental.subchannelAddressToString; +import registerLoadBalancerType = experimental.registerLoadBalancerType; +import EndpointMap = experimental.EndpointMap; +import { loadXxhashApi, xxhashApi } from './xxhash'; +import { EXPERIMENTAL_RING_HASH } from './environment'; +import { loadProtosWithOptionsSync } from '@grpc/proto-loader/build/src/util'; +import { RingHash__Output } from './generated/envoy/extensions/load_balancing_policies/ring_hash/v3/RingHash'; +import { Any__Output } from './generated/google/protobuf/Any'; +import { TypedExtensionConfig__Output } from './generated/envoy/config/core/v3/TypedExtensionConfig'; +import { LoadBalancingPolicy__Output } from './generated/envoy/config/cluster/v3/LoadBalancingPolicy'; +import { registerLbPolicy } from './lb-policy-registry'; + +const TRACER_NAME = 'ring_hash'; + +function trace(text: string): void { + experimental.trace(logVerbosity.DEBUG, TRACER_NAME, text); +} + +const TYPE_NAME = 'ring_hash'; + +const DEFAULT_MIN_RING_SIZE = 1024; +const DEFAULT_MAX_RING_SIZE = 4096; +const ABSOLUTE_MAX_RING_SIZE = 8_388_608; +const DEFAULT_RING_SIZE_CAP = 4096; + +class RingHashLoadBalancingConfig implements TypedLoadBalancingConfig { + private minRingSize: number; + private maxRingSize: number; + constructor(minRingSize?: number, maxRingSize?: number) { + this.minRingSize = Math.min( + minRingSize ?? DEFAULT_MIN_RING_SIZE, + ABSOLUTE_MAX_RING_SIZE + ); + this.maxRingSize = Math.min( + maxRingSize ?? DEFAULT_MAX_RING_SIZE, + ABSOLUTE_MAX_RING_SIZE + ); + } + getLoadBalancerName(): string { + return TYPE_NAME; + } + toJsonObject(): object { + return { + [TYPE_NAME]: { + min_ring_size: this.minRingSize, + max_ring_size: this.maxRingSize, + } + }; + } + getMinRingSize() { + return this.minRingSize; + } + getMaxRingSize() { + return this.maxRingSize; + } + // eslint-disable-next-line @typescript-eslint/no-explicit-any + static createFromJson(obj: any): TypedLoadBalancingConfig { + if ('min_ring_size' in obj) { + if (typeof obj.min_ring_size === 'number') { + if (obj.min_ring_size > ABSOLUTE_MAX_RING_SIZE) { + throw new Error(`ring_hash config field min_ring_size exceeds the cap of ${ABSOLUTE_MAX_RING_SIZE}: ${obj.min_ring_size}`); + } + } else { + throw new Error( + 'ring_hash config field min_ring_size must be a number if provided' + ); + } + } + if ('max_ring_size' in obj) { + if (typeof obj.max_ring_size === 'number') { + if (obj.max_ring_size > ABSOLUTE_MAX_RING_SIZE) { + throw new Error(`ring_hash config field max_ring_size exceeds the cap of ${ABSOLUTE_MAX_RING_SIZE}: ${obj.max_ring_size}`); + } + } else { + throw new Error( + 'ring_hash config field max_ring_size must be a number if provided' + ); + } + } + return new RingHashLoadBalancingConfig( + obj.min_ring_size, + obj.max_ring_size + ); + } +} + +interface RingEntry { + leafBalancer: LeafLoadBalancer; + hash: bigint; +} + +interface EndpointWeight { + endpoint: Endpoint; + weight: number; + normalizedWeight: number; +} + +class RingHashPicker implements Picker { + constructor(private ring: RingEntry[]) {} + /** + * Find the least index in the ring with a hash greater than or equal to the + * hash parameter, or 0 if no such index exists. + * @param hash + */ + private findIndexForHash(hash: bigint): number { + // Binary search to find the target index + let low = 0; + let high = this.ring.length; + let index = 0; + while (low <= high) { + /* Commonly in binary search, this operation can overflow and result in + * the wrong value. However, in this case the ring size is absolutely + * limtied to 1<<23, so low+high < MAX_SAFE_INTEGER */ + index = Math.floor((low + high) / 2); + if (index === this.ring.length) { + index = 0; + break; + } + const midval = this.ring[index].hash; + const midval1 = index === 0 ? 0n : this.ring[index - 1].hash; + if (hash <= midval && hash > midval1) { + break; + } + if (midval < hash) { + low = index + 1; + } else { + high = index - 1; + } + if (low > high) { + index = 0; + break; + } + } + return index; + } + pick(pickArgs: PickArgs): PickResult { + trace('Pick called. Hash=' + pickArgs.extraPickInfo.hash); + const firstIndex = this.findIndexForHash( + BigInt(pickArgs.extraPickInfo.hash) + ); + for (let i = 0; i < this.ring.length; i++) { + const index = (firstIndex + i) % this.ring.length; + const entryState = this.ring[index].leafBalancer.getConnectivityState(); + if (entryState === connectivityState.READY) { + return this.ring[index].leafBalancer.getPicker().pick(pickArgs); + } + if (entryState === connectivityState.IDLE) { + this.ring[index].leafBalancer.startConnecting(); + return { + pickResultType: PickResultType.QUEUE, + subchannel: null, + status: null, + onCallStarted: null, + onCallEnded: null, + }; + } + if (entryState === connectivityState.CONNECTING) { + return { + pickResultType: PickResultType.QUEUE, + subchannel: null, + status: null, + onCallStarted: null, + onCallEnded: null, + }; + } + } + return { + pickResultType: PickResultType.TRANSIENT_FAILURE, + status: { + code: status.UNAVAILABLE, + details: + 'ring_hash: invalid state: all child balancers in TRANSIENT_FAILURE', + metadata: new Metadata(), + }, + subchannel: null, + onCallStarted: null, + onCallEnded: null, + }; + } +} + +class RingHashLoadBalancer implements LoadBalancer { + /** + * Tracks endpoint repetition across address updates, to use an appropriate + * existing leaf load balancer for the same endpoint when possible. + */ + private leafMap = new EndpointMap(); + /** + * Tracks endpoints from a single address update, with their associated + * weights aggregated from all weights associated with that endpoint in that + * update. + */ + private leafWeightMap = new EndpointMap(); + private childChannelControlHelper: ChannelControlHelper; + private updatesPaused = false; + private currentState: connectivityState = connectivityState.IDLE; + private ring: RingEntry[] = []; + private ringHashSizeCap = DEFAULT_RING_SIZE_CAP; + constructor(private channelControlHelper: ChannelControlHelper, private options: ChannelOptions) { + this.childChannelControlHelper = createChildChannelControlHelper( + channelControlHelper, + { + updateState: (state, picker) => { + this.calculateAndUpdateState(); + /* If this LB policy is in the TRANSIENT_FAILURE state, requests will + * not trigger new connections, so we need to explicitly try connecting + * to other endpoints that are currently IDLE to try to eventually + * connect to something. */ + if ( + state === connectivityState.TRANSIENT_FAILURE && + this.currentState === connectivityState.TRANSIENT_FAILURE + ) { + for (const leaf of this.leafMap.values()) { + const leafState = leaf.getConnectivityState(); + if (leafState === connectivityState.CONNECTING) { + break; + } + if (leafState === connectivityState.IDLE) { + leaf.startConnecting(); + break; + } + } + } + }, + } + ); + if (options['grpc.lb.ring_hash.ring_size_cap'] !== undefined) { + this.ringHashSizeCap = options['grpc.lb.ring_hash.ring_size_cap']; + } + } + + private calculateAndUpdateState() { + if (this.updatesPaused) { + return; + } + const stateCounts = { + [connectivityState.READY]: 0, + [connectivityState.TRANSIENT_FAILURE]: 0, + [connectivityState.CONNECTING]: 0, + [connectivityState.IDLE]: 0, + [connectivityState.SHUTDOWN]: 0, + }; + for (const leaf of this.leafMap.values()) { + stateCounts[leaf.getConnectivityState()] += 1; + } + if (stateCounts[connectivityState.READY] > 0) { + this.updateState(connectivityState.READY, new RingHashPicker(this.ring)); + // REPORT READY + } else if (stateCounts[connectivityState.TRANSIENT_FAILURE] > 1) { + this.updateState( + connectivityState.TRANSIENT_FAILURE, + new UnavailablePicker() + ); + } else if (stateCounts[connectivityState.CONNECTING] > 0) { + this.updateState( + connectivityState.CONNECTING, + new RingHashPicker(this.ring) + ); + } else if ( + stateCounts[connectivityState.TRANSIENT_FAILURE] > 0 && + this.leafMap.size > 1 + ) { + this.updateState( + connectivityState.CONNECTING, + new RingHashPicker(this.ring) + ); + } else if (stateCounts[connectivityState.IDLE] > 0) { + this.updateState(connectivityState.IDLE, new RingHashPicker(this.ring)); + } else { + this.updateState( + connectivityState.TRANSIENT_FAILURE, + new UnavailablePicker() + ); + } + } + + private updateState(newState: connectivityState, picker: Picker) { + trace( + connectivityState[this.currentState] + + ' -> ' + + connectivityState[newState] + ); + this.currentState = newState; + this.channelControlHelper.updateState(newState, picker); + } + + private constructRing( + endpointList: Endpoint[], + config: RingHashLoadBalancingConfig + ) { + this.ring = []; + const endpointWeights: EndpointWeight[] = []; + let weightSum = 0; + for (const endpoint of endpointList) { + const weight = this.leafWeightMap.get(endpoint) ?? 1; + endpointWeights.push({ endpoint, weight, normalizedWeight: 0 }); + weightSum += weight; + } + /* The normalized weights sum to 1, with some small potential error due to + * the limitation of floating point precision. */ + let minNormalizedWeight = 1; + for (const endpointWeight of endpointWeights) { + endpointWeight.normalizedWeight = endpointWeight.weight / weightSum; + minNormalizedWeight = Math.min( + endpointWeight.normalizedWeight, + minNormalizedWeight + ); + } + const minRingSize = Math.min(config.getMinRingSize(), this.ringHashSizeCap); + const maxRingSize = Math.min(config.getMaxRingSize(), this.ringHashSizeCap); + /* Calculate a scale factor that meets the following conditions: + * 1. The result is between minRingSize and maxRingSize, inclusive + * 2. The smallest normalized weight is scaled to a whole number, if it + * does not violate the previous condition. + * The size of the ring is ceil(scale) + */ + const scale = Math.min( + Math.ceil(minNormalizedWeight * minRingSize) / minNormalizedWeight, + maxRingSize + ); + trace('Creating a ring with size ' + Math.ceil(scale)); + /* For each endpoint, create a number of entries proportional to its + * weight, such that the total number of entries is equal to ceil(scale). + */ + let currentHashes = 0; + let targetHashes = 0; + for (const endpointWeight of endpointWeights) { + const addressString = subchannelAddressToString( + endpointWeight.endpoint.addresses[0] + ); + targetHashes += scale * endpointWeight.normalizedWeight; + const leafBalancer = this.leafMap.get(endpointWeight.endpoint); + if (!leafBalancer) { + throw new Error( + 'ring_hash: Invalid state: endpoint found in leafWeightMap but not in leafMap' + ); + } + let count = 0; + while (currentHashes < targetHashes) { + const hashKey = `${addressString}_${count}`; + const hash = xxhashApi!.h64(hashKey, 0n); + this.ring.push({ hash, leafBalancer }); + currentHashes++; + count++; + } + } + /* The ring is sorted by the hash so that it can be efficiently searched + * for a hash that is closest to any arbitrary hash. */ + this.ring.sort((a, b) => { + if (a.hash > b.hash) { + return 1; + } else if (a.hash < b.hash) { + return -1; + } else { + return 0; + } + }); + } + + updateAddressList( + endpointList: Endpoint[], + lbConfig: TypedLoadBalancingConfig, + attributes: { [key: string]: unknown } + ): void { + if (!(lbConfig instanceof RingHashLoadBalancingConfig)) { + trace('Discarding address update with unrecognized config ' + JSON.stringify(lbConfig.toJsonObject(), undefined, 2)); + return; + } + trace('Received update with config ' + JSON.stringify(lbConfig.toJsonObject(), undefined, 2)); + this.updatesPaused = true; + this.leafWeightMap.clear(); + const dedupedEndpointList: Endpoint[] = []; + for (const endpoint of endpointList) { + const leafBalancer = this.leafMap.get(endpoint); + if (leafBalancer) { + leafBalancer.updateEndpoint(endpoint); + } else { + this.leafMap.set( + endpoint, + new LeafLoadBalancer(endpoint, this.childChannelControlHelper, this.options) + ); + } + const weight = this.leafWeightMap.get(endpoint); + if (weight === undefined) { + dedupedEndpointList.push(endpoint); + } + this.leafWeightMap.set(endpoint, (weight ?? 0) + (isLocalityEndpoint(endpoint) ? endpoint.endpointWeight : 1)); + } + const removedLeaves = this.leafMap.deleteMissing(endpointList); + for (const leaf of removedLeaves) { + leaf.destroy(); + } + loadXxhashApi().then(() => { + this.constructRing(dedupedEndpointList, lbConfig); + this.updatesPaused = false; + this.calculateAndUpdateState(); + }); + } + exitIdle(): void { + /* This operation does not make sense here. We don't want to make the whole + * balancer exit idle, and instead propagate that to individual chlidren as + * relevant. */ + } + resetBackoff(): void { + // There is no backoff to reset here + } + destroy(): void { + this.ring = []; + for (const child of this.leafMap.values()) { + child.destroy(); + } + this.leafMap.clear(); + this.leafWeightMap.clear(); + } + getTypeName(): string { + return TYPE_NAME; + } +} + +const RING_HASH_TYPE_URL = 'type.googleapis.com/envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash'; + +const resourceRoot = loadProtosWithOptionsSync([ + 'envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto'], { + keepCase: true, + includeDirs: [ + // Paths are relative to src/build + __dirname + '/../../deps/envoy-api/', + __dirname + '/../../deps/xds/', + __dirname + '/../../deps/protoc-gen-validate' + ], + } +); + +const toObjectOptions = { + longs: String, + enums: String, + defaults: true, + oneofs: true +} + +function decodeRingHash(message: Any__Output): RingHash__Output { + const name = message.type_url.substring(message.type_url.lastIndexOf('/') + 1); + const type = resourceRoot.lookup(name); + if (type) { + const decodedMessage = (type as any).decode(message.value); + return decodedMessage.$type.toObject(decodedMessage, toObjectOptions) as RingHash__Output; + } else { + throw new Error(`TypedStruct parsing error: unexpected type URL ${message.type_url}`); + } +} + +function convertToLoadBalancingPolicy(protoPolicy: TypedExtensionConfig__Output, selectChildPolicy: (childPolicy: LoadBalancingPolicy__Output) => LoadBalancingConfig): LoadBalancingConfig { + if (protoPolicy.typed_config?.type_url !== RING_HASH_TYPE_URL) { + throw new Error(`Ring Hash LB policy parsing error: unexpected type URL ${protoPolicy.typed_config?.type_url}`); + } + const ringHashMessage = decodeRingHash(protoPolicy.typed_config); + if (ringHashMessage.hash_function !== 'XX_HASH') { + throw new Error(`Ring Hash LB policy parsing error: unexpected hash function ${ringHashMessage.hash_function}`); + } + return { + [TYPE_NAME]: { + min_ring_size: ringHashMessage.minimum_ring_size?.value ?? 1024, + max_ring_size: ringHashMessage.maximum_ring_size?.value ?? 8_388_608 + } + }; +} + +export function setup() { + if (EXPERIMENTAL_RING_HASH) { + registerLoadBalancerType( + TYPE_NAME, + RingHashLoadBalancer, + RingHashLoadBalancingConfig + ); + registerLbPolicy(RING_HASH_TYPE_URL, convertToLoadBalancingPolicy); + } +} diff --git a/packages/grpc-js-xds/src/load-balancer-weighted-target.ts b/packages/grpc-js-xds/src/load-balancer-weighted-target.ts index 44a6acf11..89192b622 100644 --- a/packages/grpc-js-xds/src/load-balancer-weighted-target.ts +++ b/packages/grpc-js-xds/src/load-balancer-weighted-target.ts @@ -15,12 +15,11 @@ * */ -import { connectivityState as ConnectivityState, status as Status, Metadata, logVerbosity, experimental } from "@grpc/grpc-js"; -import { isLocalitySubchannelAddress, LocalitySubchannelAddress } from "./load-balancer-priority"; -import LoadBalancingConfig = experimental.LoadBalancingConfig; +import { connectivityState as ConnectivityState, status as Status, Metadata, logVerbosity, experimental, LoadBalancingConfig, ChannelOptions } from "@grpc/grpc-js"; +import { isLocalityEndpoint, LocalityEndpoint } from "./load-balancer-priority"; +import TypedLoadBalancingConfig = experimental.TypedLoadBalancingConfig; import LoadBalancer = experimental.LoadBalancer; import ChannelControlHelper = experimental.ChannelControlHelper; -import getFirstUsableConfig = experimental.getFirstUsableConfig; import registerLoadBalancerType = experimental.registerLoadBalancerType; import ChildLoadBalancerHandler = experimental.ChildLoadBalancerHandler; import Picker = experimental.Picker; @@ -28,9 +27,9 @@ import PickResult = experimental.PickResult; import PickArgs = experimental.PickArgs; import QueuePicker = experimental.QueuePicker; import UnavailablePicker = experimental.UnavailablePicker; -import SubchannelAddress = experimental.SubchannelAddress; -import subchannelAddressToString = experimental.subchannelAddressToString; -import validateLoadBalancingConfig = experimental.validateLoadBalancingConfig; +import Endpoint = experimental.Endpoint; +import endpointToString = experimental.endpointToString; +import selectLbConfigFromList = experimental.selectLbConfigFromList; const TRACER_NAME = 'weighted_target'; @@ -42,12 +41,30 @@ const TYPE_NAME = 'weighted_target'; const DEFAULT_RETENTION_INTERVAL_MS = 15 * 60 * 1000; - export interface WeightedTarget { +/** + * Type of the config for an individual child in the JSON representation of + * a weighted target LB policy config. + */ +export interface WeightedTargetRaw { weight: number; child_policy: LoadBalancingConfig[]; } -export class WeightedTargetLoadBalancingConfig implements LoadBalancingConfig { +/** + * The JSON representation of the config for the weighted target LB policy. The + * LoadBalancingConfig for a weighted target policy should have the form + * { weighted_target: WeightedTargetRawConfig } + */ +export interface WeightedTargetRawConfig { + targets: {[name: string]: WeightedTargetRaw }; +} + +interface WeightedTarget { + weight: number; + child_policy: TypedLoadBalancingConfig; +} + +class WeightedTargetLoadBalancingConfig implements TypedLoadBalancingConfig { getLoadBalancerName(): string { return TYPE_NAME; } @@ -64,7 +81,7 @@ export class WeightedTargetLoadBalancingConfig implements LoadBalancingConfig { for (const [targetName, targetValue] of this.targets.entries()) { targetsField[targetName] = { weight: targetValue.weight, - child_policy: targetValue.child_policy.map(policy => policy.toJsonObject()) + child_policy: [targetValue.child_policy.toJsonObject()] }; } return { @@ -79,7 +96,7 @@ export class WeightedTargetLoadBalancingConfig implements LoadBalancingConfig { if (!('targets' in obj && obj.targets !== null && typeof obj.targets === 'object')) { throw new Error('Weighted target config must have a targets map'); } - for (const key of obj.targets) { + for (const key of Object.keys(obj.targets)) { const targetObj = obj.targets[key]; if (!('weight' in targetObj && typeof targetObj.weight === 'number')) { throw new Error(`Weighted target ${key} must have a numeric weight`); @@ -87,9 +104,13 @@ export class WeightedTargetLoadBalancingConfig implements LoadBalancingConfig { if (!('child_policy' in targetObj && Array.isArray(targetObj.child_policy))) { throw new Error(`Weighted target ${key} must have a child_policy array`); } + const childConfig = selectLbConfigFromList(targetObj.child_policy); + if (!childConfig) { + throw new Error(`Weighted target ${key} config parsing failed`); + } const validatedTarget: WeightedTarget = { weight: targetObj.weight, - child_policy: targetObj.child_policy.map(validateLoadBalancingConfig) + child_policy: childConfig } targetsMap.set(key, validatedTarget); } @@ -119,36 +140,21 @@ class WeightedTargetPicker implements Picker { pick(pickArgs: PickArgs): PickResult { // num | 0 is equivalent to floor(num) const selection = (Math.random() * this.rangeTotal) | 0; - - /* Binary search for the element of the list such that - * pickerList[index - 1].rangeEnd <= selection < pickerList[index].rangeEnd - */ - let mid = 0; - let startIndex = 0; - let endIndex = this.pickerList.length - 1; - let index = 0; - while (endIndex > startIndex) { - mid = ((startIndex + endIndex) / 2) | 0; - if (this.pickerList[mid].rangeEnd > selection) { - endIndex = mid; - } else if (this.pickerList[mid].rangeEnd < selection) { - startIndex = mid + 1; - } else { - // + 1 here because the range is exclusive at the top end - index = mid + 1; - break; + + for (const entry of this.pickerList) { + if (selection < entry.rangeEnd) { + return entry.picker.pick(pickArgs); } } - if (index === 0) { - index = startIndex; - } - return this.pickerList[index].picker.pick(pickArgs); + /* Default to first element if the iteration doesn't find anything for some + * reason. */ + return this.pickerList[0].picker.pick(pickArgs); } } interface WeightedChild { - updateAddressList(addressList: SubchannelAddress[], lbConfig: WeightedTarget, attributes: { [key: string]: unknown; }): void; + updateAddressList(endpointList: Endpoint[], lbConfig: WeightedTarget, attributes: { [key: string]: unknown; }): void; exitIdle(): void; resetBackoff(): void; destroy(): void; @@ -168,17 +174,11 @@ export class WeightedTargetLoadBalancer implements LoadBalancer { private weight: number = 0; constructor(private parent: WeightedTargetLoadBalancer, private name: string) { - this.childBalancer = new ChildLoadBalancerHandler({ - createSubchannel: (subchannelAddress, subchannelOptions) => { - return this.parent.channelControlHelper.createSubchannel(subchannelAddress, subchannelOptions); - }, - updateState: (connectivityState, picker) => { + this.childBalancer = new ChildLoadBalancerHandler(experimental.createChildChannelControlHelper(this.parent.channelControlHelper, { + updateState: (connectivityState: ConnectivityState, picker: Picker) => { this.updateState(connectivityState, picker); }, - requestReresolution: () => { - this.parent.channelControlHelper.requestReresolution(); - } - }); + }), parent.options); this.picker = new QueuePicker(this.childBalancer); } @@ -187,15 +187,12 @@ export class WeightedTargetLoadBalancer implements LoadBalancer { trace('Target ' + this.name + ' ' + ConnectivityState[this.connectivityState] + ' -> ' + ConnectivityState[connectivityState]); this.connectivityState = connectivityState; this.picker = picker; - this.parent.updateState(); + this.parent.maybeUpdateState(); } - updateAddressList(addressList: SubchannelAddress[], lbConfig: WeightedTarget, attributes: { [key: string]: unknown; }): void { + updateAddressList(endpointList: Endpoint[], lbConfig: WeightedTarget, attributes: { [key: string]: unknown; }): void { this.weight = lbConfig.weight; - const childConfig = getFirstUsableConfig(lbConfig.child_policy); - if (childConfig !== null) { - this.childBalancer.updateAddressList(addressList, childConfig, attributes); - } + this.childBalancer.updateAddressList(endpointList, lbConfig.child_policy, attributes); } exitIdle(): void { this.childBalancer.exitIdle(); @@ -244,8 +241,15 @@ export class WeightedTargetLoadBalancer implements LoadBalancer { * List of current target names. */ private targetList: string[] = []; + private updatesPaused = false; - constructor(private channelControlHelper: ChannelControlHelper) {} + constructor(private channelControlHelper: ChannelControlHelper, private options: ChannelOptions) {} + + private maybeUpdateState() { + if (!this.updatesPaused) { + this.updateState() + } + } private updateState() { const pickerList: WeightedPicker[] = []; @@ -315,7 +319,7 @@ export class WeightedTargetLoadBalancer implements LoadBalancer { this.channelControlHelper.updateState(connectivityState, picker); } - updateAddressList(addressList: SubchannelAddress[], lbConfig: LoadBalancingConfig, attributes: { [key: string]: unknown; }): void { + updateAddressList(addressList: Endpoint[], lbConfig: TypedLoadBalancingConfig, attributes: { [key: string]: unknown; }): void { if (!(lbConfig instanceof WeightedTargetLoadBalancingConfig)) { // Reject a config of the wrong type trace('Discarding address list update with unrecognized config ' + JSON.stringify(lbConfig.toJsonObject(), undefined, 2)); @@ -326,9 +330,9 @@ export class WeightedTargetLoadBalancer implements LoadBalancer { * which child it belongs to. So we bucket those addresses by that first * element, and pass along the rest of the localityPath for that child * to use. */ - const childAddressMap = new Map(); + const childEndpointMap = new Map(); for (const address of addressList) { - if (!isLocalitySubchannelAddress(address)) { + if (!isLocalityEndpoint(address)) { // Reject address that cannot be associated with targets return; } @@ -337,18 +341,19 @@ export class WeightedTargetLoadBalancer implements LoadBalancer { return; } const childName = address.localityPath[0]; - const childAddress: LocalitySubchannelAddress = { + const childAddress: LocalityEndpoint = { ...address, localityPath: address.localityPath.slice(1), }; - let childAddressList = childAddressMap.get(childName); + let childAddressList = childEndpointMap.get(childName); if (childAddressList === undefined) { childAddressList = []; - childAddressMap.set(childName, childAddressList); + childEndpointMap.set(childName, childAddressList); } childAddressList.push(childAddress); } + this.updatesPaused = true; this.targetList = Array.from(lbConfig.getTargets().keys()); for (const [targetName, targetConfig] of lbConfig.getTargets()) { let target = this.targets.get(targetName); @@ -358,9 +363,9 @@ export class WeightedTargetLoadBalancer implements LoadBalancer { } else { target.maybeReactivate(); } - const targetAddresses = childAddressMap.get(targetName) ?? []; - trace('Assigning target ' + targetName + ' address list ' + targetAddresses.map(address => '(' + subchannelAddressToString(address) + ' path=' + address.localityPath + ')')); - target.updateAddressList(targetAddresses, targetConfig, attributes); + const targetEndpoints = childEndpointMap.get(targetName) ?? []; + trace('Assigning target ' + targetName + ' address list ' + targetEndpoints.map(endpoint => '(' + endpointToString(endpoint) + ' path=' + endpoint.localityPath + ')')); + target.updateAddressList(targetEndpoints, targetConfig, attributes); } // Deactivate targets that are not in the new config @@ -370,6 +375,7 @@ export class WeightedTargetLoadBalancer implements LoadBalancer { target.deactivate(); } } + this.updatesPaused = false; this.updateState(); } @@ -396,4 +402,4 @@ export class WeightedTargetLoadBalancer implements LoadBalancer { export function setup() { registerLoadBalancerType(TYPE_NAME, WeightedTargetLoadBalancer, WeightedTargetLoadBalancingConfig); -} \ No newline at end of file +} diff --git a/packages/grpc-js-xds/src/load-balancer-xds-cluster-impl.ts b/packages/grpc-js-xds/src/load-balancer-xds-cluster-impl.ts new file mode 100644 index 000000000..f163b6fe2 --- /dev/null +++ b/packages/grpc-js-xds/src/load-balancer-xds-cluster-impl.ts @@ -0,0 +1,330 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { experimental, logVerbosity, status as Status, Metadata, connectivityState, ChannelOptions } from "@grpc/grpc-js"; +import { validateXdsServerConfig, XdsServerConfig } from "./xds-bootstrap"; +import { getSingletonXdsClient, XdsClient, XdsClusterDropStats, XdsClusterLocalityStats } from "./xds-client"; +import { LocalityEndpoint } from "./load-balancer-priority"; + +import LoadBalancer = experimental.LoadBalancer; +import registerLoadBalancerType = experimental.registerLoadBalancerType; +import Endpoint = experimental.Endpoint; +import endpointHasAddress = experimental.endpointHasAddress; +import subchannelAddressToString = experimental.subchannelAddressToString; +import Picker = experimental.Picker; +import PickArgs = experimental.PickArgs; +import PickResult = experimental.PickResult; +import PickResultType = experimental.PickResultType; +import ChannelControlHelper = experimental.ChannelControlHelper; +import ChildLoadBalancerHandler = experimental.ChildLoadBalancerHandler; +import createChildChannelControlHelper = experimental.createChildChannelControlHelper; +import TypedLoadBalancingConfig = experimental.TypedLoadBalancingConfig; +import selectLbConfigFromList = experimental.selectLbConfigFromList; +import SubchannelInterface = experimental.SubchannelInterface; +import BaseSubchannelWrapper = experimental.BaseSubchannelWrapper; +import { Locality__Output } from "./generated/envoy/config/core/v3/Locality"; + +const TRACER_NAME = 'xds_cluster_impl'; + +function trace(text: string): void { + experimental.trace(logVerbosity.DEBUG, TRACER_NAME, text); +} + +const TYPE_NAME = 'xds_cluster_impl'; + +const DEFAULT_MAX_CONCURRENT_REQUESTS = 1024; + +export interface DropCategory { + category: string; + requests_per_million: number; +} + +function validateDropCategory(obj: any): DropCategory { + if (!('category' in obj && typeof obj.category === 'string')) { + throw new Error('xds_cluster_impl config drop_categories entry must have a string field category'); + } + if (!('requests_per_million' in obj && typeof obj.requests_per_million === 'number')) { + throw new Error('xds_cluster_impl config drop_categories entry must have a number field requests_per_million'); + } + return obj; +} + +class XdsClusterImplLoadBalancingConfig implements TypedLoadBalancingConfig { + private maxConcurrentRequests: number; + getLoadBalancerName(): string { + return TYPE_NAME; + } + toJsonObject(): object { + const jsonObj: {[key: string]: any} = { + cluster: this.cluster, + drop_categories: this.dropCategories, + child_policy: [this.childPolicy.toJsonObject()], + max_concurrent_requests: this.maxConcurrentRequests, + eds_service_name: this.edsServiceName, + lrs_load_reporting_server: this.lrsLoadReportingServer, + }; + return { + [TYPE_NAME]: jsonObj + }; + } + + constructor(private cluster: string, private dropCategories: DropCategory[], private childPolicy: TypedLoadBalancingConfig, private edsServiceName: string, private lrsLoadReportingServer?: XdsServerConfig, maxConcurrentRequests?: number) { + this.maxConcurrentRequests = maxConcurrentRequests ?? DEFAULT_MAX_CONCURRENT_REQUESTS; + } + + getCluster() { + return this.cluster; + } + + getEdsServiceName() { + return this.edsServiceName; + } + + getLrsLoadReportingServer() { + return this.lrsLoadReportingServer; + } + + getMaxConcurrentRequests() { + return this.maxConcurrentRequests; + } + + getDropCategories() { + return this.dropCategories; + } + + getChildPolicy() { + return this.childPolicy; + } + + static createFromJson(obj: any): XdsClusterImplLoadBalancingConfig { + if (!('cluster' in obj && typeof obj.cluster === 'string')) { + throw new Error('xds_cluster_impl config must have a string field cluster'); + } + if (!('eds_service_name' in obj && typeof obj.eds_service_name === 'string')) { + throw new Error('xds_cluster_impl config must have a string field eds_service_name'); + } + if ('max_concurrent_requests' in obj && !(obj.max_concurrent_requests === undefined || typeof obj.max_concurrent_requests === 'number')) { + throw new Error('xds_cluster_impl config max_concurrent_requests must be a number if provided'); + } + if (!('drop_categories' in obj && Array.isArray(obj.drop_categories))) { + throw new Error('xds_cluster_impl config must have an array field drop_categories'); + } + if (!('child_policy' in obj && Array.isArray(obj.child_policy))) { + throw new Error('xds_cluster_impl config must have an array field child_policy'); + } + const childConfig = selectLbConfigFromList(obj.child_policy); + if (!childConfig) { + throw new Error('xds_cluster_impl config child_policy parsing failed'); + } + let lrsServer: XdsServerConfig | undefined = undefined; + if (obj.lrs_load_reporting_server) { + lrsServer = validateXdsServerConfig(obj.lrs_load_reporting_server) + } + return new XdsClusterImplLoadBalancingConfig(obj.cluster, obj.drop_categories.map(validateDropCategory), childConfig, obj.eds_service_name, lrsServer, obj.max_concurrent_requests); + } +} + +class CallCounterMap { + private callCounters = new Map(); + + startCall(key: string) { + const currentValue = this.callCounters.get(key) ?? 0; + this.callCounters.set(key, currentValue + 1); + } + + endCall(key: string) { + const currentValue = this.callCounters.get(key) ?? 0; + if (currentValue - 1 <= 0) { + this.callCounters.delete(key); + } else { + this.callCounters.set(key, currentValue - 1); + } + } + + getConcurrentRequests(key: string) { + return this.callCounters.get(key) ?? 0; + } +} + +const callCounterMap = new CallCounterMap(); + +class LocalitySubchannelWrapper extends BaseSubchannelWrapper implements SubchannelInterface { + constructor(child: SubchannelInterface, private statsObject: XdsClusterLocalityStats | null) { + super(child); + } + + getStatsObject() { + return this.statsObject; + } + + getWrappedSubchannel(): SubchannelInterface { + return this.child; + } +} + +/** + * This picker is responsible for implementing the drop configuration, and for + * recording drop stats and per-locality stats. + */ +class XdsClusterImplPicker implements Picker { + constructor(private originalPicker: Picker, private callCounterMapKey: string, private maxConcurrentRequests: number, private dropCategories: DropCategory[], private clusterDropStats: XdsClusterDropStats | null) {} + + private checkForMaxConcurrentRequestsDrop(): boolean { + return callCounterMap.getConcurrentRequests(this.callCounterMapKey) >= this.maxConcurrentRequests; + } + + private checkForDrop(): string | null { + for (const dropCategory of this.dropCategories) { + if (Math.random() * 1_000_000 < dropCategory.requests_per_million) { + return dropCategory.category; + } + } + return null; + } + + pick(pickArgs: PickArgs): PickResult { + let details: string | null = null; + if (this.checkForMaxConcurrentRequestsDrop()) { + details = 'Call dropped by load balancing policy.'; + this.clusterDropStats?.addUncategorizedCallDropped(); + } else { + const category = this.checkForDrop(); + if (category !== null) { + details = `Call dropped by load balancing policy. Category: ${category}`; + this.clusterDropStats?.addCallDropped(category); + } + } + if (details === null) { + const originalPick = this.originalPicker.pick(pickArgs); + const pickSubchannel = originalPick.subchannel ? (originalPick.subchannel as LocalitySubchannelWrapper) : null; + return { + pickResultType: originalPick.pickResultType, + status: originalPick.status, + subchannel: pickSubchannel?.getWrappedSubchannel() ?? null, + onCallStarted: () => { + originalPick.onCallStarted?.(); + pickSubchannel?.getStatsObject()?.addCallStarted(); + callCounterMap.startCall(this.callCounterMapKey); + }, + onCallEnded: status => { + originalPick.onCallEnded?.(status); + pickSubchannel?.getStatsObject()?.addCallFinished(status !== Status.OK) + callCounterMap.endCall(this.callCounterMapKey); + } + }; + } else { + return { + pickResultType: PickResultType.DROP, + status: { + code: Status.UNAVAILABLE, + details: details, + metadata: new Metadata(), + }, + subchannel: null, + onCallEnded: null, + onCallStarted: null + }; + } + } +} + +function getCallCounterMapKey(cluster: string, edsServiceName?: string): string { + return `{${cluster},${edsServiceName ?? ''}}`; +} + +class XdsClusterImplBalancer implements LoadBalancer { + private childBalancer: ChildLoadBalancerHandler; + private lastestEndpointList: Endpoint[] | null = null; + private latestConfig: XdsClusterImplLoadBalancingConfig | null = null; + private clusterDropStats: XdsClusterDropStats | null = null; + private xdsClient: XdsClient | null = null; + + constructor(private readonly channelControlHelper: ChannelControlHelper, options: ChannelOptions) { + this.childBalancer = new ChildLoadBalancerHandler(createChildChannelControlHelper(channelControlHelper, { + createSubchannel: (subchannelAddress, subchannelArgs) => { + if (!this.xdsClient || !this.latestConfig || !this.lastestEndpointList) { + throw new Error('xds_cluster_impl: invalid state: createSubchannel called with xdsClient or latestConfig not populated'); + } + const wrapperChild = channelControlHelper.createSubchannel(subchannelAddress, subchannelArgs); + let locality: Locality__Output | null = null; + for (const endpoint of this.lastestEndpointList) { + if (endpointHasAddress(endpoint, subchannelAddress)) { + locality = (endpoint as LocalityEndpoint).locality; + } + } + if (locality === null) { + trace('Not reporting load for address ' + subchannelAddressToString(subchannelAddress) + ' because it has unknown locality.'); + return wrapperChild; + } + const lrsServer = this.latestConfig.getLrsLoadReportingServer(); + let statsObj: XdsClusterLocalityStats | null = null; + if (lrsServer) { + statsObj = this.xdsClient.addClusterLocalityStats( + lrsServer, + this.latestConfig.getCluster(), + this.latestConfig.getEdsServiceName(), + locality + ); + } + return new LocalitySubchannelWrapper(wrapperChild, statsObj); + }, + updateState: (connectivityState, originalPicker) => { + if (this.latestConfig === null) { + channelControlHelper.updateState(connectivityState, originalPicker); + } else { + const picker = new XdsClusterImplPicker(originalPicker, getCallCounterMapKey(this.latestConfig.getCluster(), this.latestConfig.getEdsServiceName()), this.latestConfig.getMaxConcurrentRequests(), this.latestConfig.getDropCategories(), this.clusterDropStats); + channelControlHelper.updateState(connectivityState, picker); + } + } + }), options); + } + updateAddressList(endpointList: Endpoint[], lbConfig: TypedLoadBalancingConfig, attributes: { [key: string]: unknown; }): void { + if (!(lbConfig instanceof XdsClusterImplLoadBalancingConfig)) { + trace('Discarding address list update with unrecognized config ' + JSON.stringify(lbConfig.toJsonObject(), undefined, 2)); + return; + } + trace('Received update with config: ' + JSON.stringify(lbConfig, undefined, 2)); + this.lastestEndpointList = endpointList; + this.latestConfig = lbConfig; + this.xdsClient = attributes.xdsClient as XdsClient; + if (lbConfig.getLrsLoadReportingServer()) { + this.clusterDropStats = this.xdsClient.addClusterDropStats( + lbConfig.getLrsLoadReportingServer()!, + lbConfig.getCluster(), + lbConfig.getEdsServiceName() ?? '' + ); + } + + this.childBalancer.updateAddressList(endpointList, lbConfig.getChildPolicy(), attributes); + } + exitIdle(): void { + this.childBalancer.exitIdle(); + } + resetBackoff(): void { + this.childBalancer.resetBackoff(); + } + destroy(): void { + this.childBalancer.destroy(); + } + getTypeName(): string { + return TYPE_NAME; + } +} + +export function setup() { + registerLoadBalancerType(TYPE_NAME, XdsClusterImplBalancer, XdsClusterImplLoadBalancingConfig); +} diff --git a/packages/grpc-js-xds/src/load-balancer-xds-cluster-manager.ts b/packages/grpc-js-xds/src/load-balancer-xds-cluster-manager.ts index 920a43db9..99059dcac 100644 --- a/packages/grpc-js-xds/src/load-balancer-xds-cluster-manager.ts +++ b/packages/grpc-js-xds/src/load-balancer-xds-cluster-manager.ts @@ -15,10 +15,9 @@ * */ -import { connectivityState as ConnectivityState, status as Status, experimental, logVerbosity, Metadata, status } from "@grpc/grpc-js/"; +import { connectivityState as ConnectivityState, status as Status, experimental, logVerbosity, Metadata, status, ChannelOptions } from "@grpc/grpc-js/"; -import LoadBalancingConfig = experimental.LoadBalancingConfig; -import validateLoadBalancingConfig = experimental.validateLoadBalancingConfig; +import TypedLoadBalancingConfig = experimental.TypedLoadBalancingConfig; import LoadBalancer = experimental.LoadBalancer; import Picker = experimental.Picker; import PickResult = experimental.PickResult; @@ -26,10 +25,10 @@ import PickArgs = experimental.PickArgs; import PickResultType = experimental.PickResultType; import UnavailablePicker = experimental.UnavailablePicker; import QueuePicker = experimental.QueuePicker; -import SubchannelAddress = experimental.SubchannelAddress; +import Endpoint = experimental.Endpoint; import ChildLoadBalancerHandler = experimental.ChildLoadBalancerHandler; -import getFirstUsableConfig = experimental.getFirstUsableConfig; import ChannelControlHelper = experimental.ChannelControlHelper; +import selectLbConfigFromList = experimental.selectLbConfigFromList; import registerLoadBalancerType = experimental.registerLoadBalancerType; const TRACER_NAME = 'xds_cluster_manager'; @@ -40,16 +39,12 @@ function trace(text: string): void { const TYPE_NAME = 'xds_cluster_manager'; -interface ClusterManagerChild { - child_policy: LoadBalancingConfig[]; -} - -export class XdsClusterManagerLoadBalancingConfig implements LoadBalancingConfig { +class XdsClusterManagerLoadBalancingConfig implements TypedLoadBalancingConfig { getLoadBalancerName(): string { return TYPE_NAME; } - constructor(private children: Map) {} + constructor(private children: Map) {} getChildren() { return this.children; @@ -57,9 +52,9 @@ export class XdsClusterManagerLoadBalancingConfig implements LoadBalancingConfig toJsonObject(): object { const childrenField: {[key: string]: object} = {}; - for (const [childName, childValue] of this.children.entries()) { + for (const [childName, childPolicy] of this.children.entries()) { childrenField[childName] = { - child_policy: childValue.child_policy.map(policy => policy.toJsonObject()) + child_policy: [childPolicy.toJsonObject()] }; } return { @@ -70,19 +65,20 @@ export class XdsClusterManagerLoadBalancingConfig implements LoadBalancingConfig } static createFromJson(obj: any): XdsClusterManagerLoadBalancingConfig { - const childrenMap: Map = new Map(); + const childrenMap: Map = new Map(); if (!('children' in obj && obj.children !== null && typeof obj.children === 'object')) { throw new Error('xds_cluster_manager config must have a children map'); } - for (const key of obj.children) { + for (const key of Object.keys(obj.children)) { const childObj = obj.children[key]; if (!('child_policy' in childObj && Array.isArray(childObj.child_policy))) { throw new Error(`xds_cluster_manager child ${key} must have a child_policy array`); } - const validatedChild = { - child_policy: childObj.child_policy.map(validateLoadBalancingConfig) - }; - childrenMap.set(key, validatedChild); + const childPolicy = selectLbConfigFromList(childObj.child_policy); + if (childPolicy === null) { + throw new Error(`xds_cluster_mananger child ${key} has no recognized sucessfully parsed child_policy`); + } + childrenMap.set(key, childPolicy); } return new XdsClusterManagerLoadBalancingConfig(childrenMap); } @@ -107,15 +103,15 @@ class XdsClusterManagerPicker implements Picker { metadata: new Metadata(), }, subchannel: null, - extraFilterFactory: null, - onCallStarted: null + onCallStarted: null, + onCallEnded: null }; } } } interface XdsClusterManagerChild { - updateAddressList(addressList: SubchannelAddress[], lbConfig: ClusterManagerChild, attributes: { [key: string]: unknown; }): void; + updateAddressList(endpointList: Endpoint[], childConfig: TypedLoadBalancingConfig, attributes: { [key: string]: unknown; }): void; exitIdle(): void; resetBackoff(): void; destroy(): void; @@ -131,17 +127,11 @@ class XdsClusterManager implements LoadBalancer { private childBalancer: ChildLoadBalancerHandler; constructor(private parent: XdsClusterManager, private name: string) { - this.childBalancer = new ChildLoadBalancerHandler({ - createSubchannel: (subchannelAddress, subchannelOptions) => { - return this.parent.channelControlHelper.createSubchannel(subchannelAddress, subchannelOptions); - }, - updateState: (connectivityState, picker) => { + this.childBalancer = new ChildLoadBalancerHandler(experimental.createChildChannelControlHelper(this.parent.channelControlHelper, { + updateState: (connectivityState: ConnectivityState, picker: Picker) => { this.updateState(connectivityState, picker); }, - requestReresolution: () => { - this.parent.channelControlHelper.requestReresolution(); - } - }); + }), parent.options); this.picker = new QueuePicker(this.childBalancer); } @@ -150,13 +140,10 @@ class XdsClusterManager implements LoadBalancer { trace('Child ' + this.name + ' ' + ConnectivityState[this.connectivityState] + ' -> ' + ConnectivityState[connectivityState]); this.connectivityState = connectivityState; this.picker = picker; - this.parent.updateState(); + this.parent.maybeUpdateState(); } - updateAddressList(addressList: SubchannelAddress[], lbConfig: ClusterManagerChild, attributes: { [key: string]: unknown; }): void { - const childConfig = getFirstUsableConfig(lbConfig.child_policy); - if (childConfig !== null) { - this.childBalancer.updateAddressList(addressList, childConfig, attributes); - } + updateAddressList(endpointList: Endpoint[], childConfig: TypedLoadBalancingConfig, attributes: { [key: string]: unknown; }): void { + this.childBalancer.updateAddressList(endpointList, childConfig, attributes); } exitIdle(): void { this.childBalancer.exitIdle(); @@ -179,7 +166,14 @@ class XdsClusterManager implements LoadBalancer { private children: Map = new Map(); // Shutdown is a placeholder value that will never appear in normal operation. private currentState: ConnectivityState = ConnectivityState.SHUTDOWN; - constructor(private channelControlHelper: ChannelControlHelper) {} + private updatesPaused = false; + constructor(private channelControlHelper: ChannelControlHelper, private options: ChannelOptions) {} + + private maybeUpdateState() { + if (!this.updatesPaused) { + this.updateState(); + } + } private updateState() { const pickerMap: Map = new Map(); @@ -210,38 +204,10 @@ class XdsClusterManager implements LoadBalancer { } else { connectivityState = ConnectivityState.TRANSIENT_FAILURE; } - /* For each of the states CONNECTING, IDLE, and TRANSIENT_FAILURE, there is - * exactly one corresponding picker, so if the state is one of those and - * that does not change, no new information is provided by passing the - * new state upward. */ - if (connectivityState === this.currentState && connectivityState !== ConnectivityState.READY) { - return; - } - let picker: Picker; - - switch (connectivityState) { - case ConnectivityState.READY: - picker = new XdsClusterManagerPicker(pickerMap); - break; - case ConnectivityState.CONNECTING: - case ConnectivityState.IDLE: - picker = new QueuePicker(this); - break; - default: - picker = new UnavailablePicker({ - code: Status.UNAVAILABLE, - details: 'xds_cluster_manager: all children report state TRANSIENT_FAILURE', - metadata: new Metadata() - }); - } - trace( - 'Transitioning to ' + - ConnectivityState[connectivityState] - ); - this.channelControlHelper.updateState(connectivityState, picker); + this.channelControlHelper.updateState(connectivityState, new XdsClusterManagerPicker(pickerMap)); } - - updateAddressList(addressList: SubchannelAddress[], lbConfig: LoadBalancingConfig, attributes: { [key: string]: unknown; }): void { + + updateAddressList(endpointList: Endpoint[], lbConfig: TypedLoadBalancingConfig, attributes: { [key: string]: unknown; }): void { if (!(lbConfig instanceof XdsClusterManagerLoadBalancingConfig)) { // Reject a config of the wrong type trace('Discarding address list update with unrecognized config ' + JSON.stringify(lbConfig.toJsonObject(), undefined, 2)); @@ -256,6 +222,7 @@ class XdsClusterManager implements LoadBalancer { namesToRemove.push(name); } } + this.updatesPaused = true; for (const name of namesToRemove) { this.children.get(name)!.destroy(); this.children.delete(name); @@ -264,10 +231,11 @@ class XdsClusterManager implements LoadBalancer { for (const [name, childConfig] of configChildren.entries()) { if (!this.children.has(name)) { const newChild = new this.XdsClusterManagerChildImpl(this, name); - newChild.updateAddressList(addressList, childConfig, attributes); + newChild.updateAddressList(endpointList, childConfig, attributes); this.children.set(name, newChild); } } + this.updatesPaused = false; this.updateState(); } exitIdle(): void { @@ -293,4 +261,4 @@ class XdsClusterManager implements LoadBalancer { export function setup() { registerLoadBalancerType(TYPE_NAME, XdsClusterManager, XdsClusterManagerLoadBalancingConfig); -} \ No newline at end of file +} diff --git a/packages/grpc-js-xds/src/load-balancer-xds-cluster-resolver.ts b/packages/grpc-js-xds/src/load-balancer-xds-cluster-resolver.ts new file mode 100644 index 000000000..29c0b6f31 --- /dev/null +++ b/packages/grpc-js-xds/src/load-balancer-xds-cluster-resolver.ts @@ -0,0 +1,499 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { ChannelOptions, LoadBalancingConfig, Metadata, connectivityState, experimental, logVerbosity, status } from "@grpc/grpc-js"; +import { registerLoadBalancerType } from "@grpc/grpc-js/build/src/load-balancer"; +import { EXPERIMENTAL_OUTLIER_DETECTION } from "./environment"; +import { Locality__Output } from "./generated/envoy/config/core/v3/Locality"; +import { ClusterLoadAssignment__Output } from "./generated/envoy/config/endpoint/v3/ClusterLoadAssignment"; +import { LocalityEndpoint, PriorityChildRaw } from "./load-balancer-priority"; +import { getSingletonXdsClient, Watcher, XdsClient } from "./xds-client"; +import { DropCategory } from "./load-balancer-xds-cluster-impl"; + +import TypedLoadBalancingConfig = experimental.TypedLoadBalancingConfig; +import LoadBalancer = experimental.LoadBalancer; +import Resolver = experimental.Resolver; +import SubchannelAddress = experimental.SubchannelAddress; +import Endpoint = experimental.Endpoint; +import ChildLoadBalancerHandler = experimental.ChildLoadBalancerHandler; +import createResolver = experimental.createResolver; +import ChannelControlHelper = experimental.ChannelControlHelper; +import OutlierDetectionRawConfig = experimental.OutlierDetectionRawConfig; +import subchannelAddressToString = experimental.subchannelAddressToString; +import endpointToString = experimental.endpointToString; +import selectLbConfigFromList = experimental.selectLbConfigFromList; +import parseLoadBalancingConfig = experimental.parseLoadBalancingConfig; +import UnavailablePicker = experimental.UnavailablePicker; +import { serverConfigEqual, validateXdsServerConfig, XdsServerConfig } from "./xds-bootstrap"; +import { EndpointResourceType } from "./xds-resource-type/endpoint-resource-type"; + +const TRACER_NAME = 'xds_cluster_resolver'; + +function trace(text: string): void { + experimental.trace(logVerbosity.DEBUG, TRACER_NAME, text); +} + +export interface DiscoveryMechanism { + cluster: string; + lrs_load_reporting_server?: XdsServerConfig; + max_concurrent_requests?: number; + type: 'EDS' | 'LOGICAL_DNS'; + eds_service_name?: string; + dns_hostname?: string; + outlier_detection?: OutlierDetectionRawConfig; +} + +function validateDiscoveryMechanism(obj: any): DiscoveryMechanism { + if (!('cluster' in obj && typeof obj.cluster === 'string')) { + throw new Error('discovery_mechanisms entry must have a string field cluster'); + } + if (!('type' in obj && (obj.type === 'EDS' || obj.type === 'LOGICAL_DNS'))) { + throw new Error('discovery_mechanisms entry must have a field "type" with the value "EDS" or "LOGICAL_DNS"'); + } + if ('max_concurrent_requests' in obj && obj.max_concurrent_requests !== undefined && typeof obj.max_concurrent_requests !== "number") { + throw new Error('discovery_mechanisms entry max_concurrent_requests field must be a number if provided'); + } + if ('eds_service_name' in obj && obj.eds_service_name !== undefined && typeof obj.eds_service_name !== 'string') { + throw new Error('discovery_mechanisms entry eds_service_name field must be a string if provided'); + } + if ('dns_hostname' in obj && obj.dns_hostname !== undefined && typeof obj.dns_hostname !== 'string') { + throw new Error('discovery_mechanisms entry dns_hostname field must be a string if provided'); + } + return {...obj, lrs_load_reporting_server: obj.lrs_load_reporting_server ? validateXdsServerConfig(obj.lrs_load_reporting_server) : undefined}; +} + +const TYPE_NAME = 'xds_cluster_resolver'; + +class XdsClusterResolverLoadBalancingConfig implements TypedLoadBalancingConfig { + getLoadBalancerName(): string { + return TYPE_NAME; + } + toJsonObject(): object { + return { + [TYPE_NAME]: { + discovery_mechanisms: this.discoveryMechanisms, + xds_lb_policy: this.xdsLbPolicy + } + } + } + + constructor(private discoveryMechanisms: DiscoveryMechanism[], private xdsLbPolicy: LoadBalancingConfig[]) {} + + getDiscoveryMechanisms() { + return this.discoveryMechanisms; + } + + getXdsLbPolicy() { + return this.xdsLbPolicy; + } + + static createFromJson(obj: any): XdsClusterResolverLoadBalancingConfig { + if (!('discovery_mechanisms' in obj && Array.isArray(obj.discovery_mechanisms))) { + throw new Error('xds_cluster_resolver config must have a discovery_mechanisms array'); + } + if (!('xds_lb_policy' in obj && Array.isArray(obj.xds_lb_policy))) { + throw new Error('xds_cluster_resolver config must have a xds_lb_policy array'); + } + return new XdsClusterResolverLoadBalancingConfig( + obj.discovery_mechanisms.map(validateDiscoveryMechanism), + obj.xds_lb_policy + ); + } +} + +interface WeightedEndpoint { + endpoint: Endpoint; + weight: number; +} + +interface LocalityEntry { + locality: Locality__Output; + weight: number; + endpoints: WeightedEndpoint[]; +} + +interface PriorityEntry { + localities: LocalityEntry[]; + dropCategories: DropCategory[]; +} + +interface DiscoveryMechanismEntry { + discoveryMechanism: DiscoveryMechanism; + localityPriorities: Map; + priorityNames: string[]; + nextPriorityChildNumber: number; + watcher?: Watcher; + resolver?: Resolver; + latestUpdate?: PriorityEntry[]; +} + +function getEdsPriorities(edsUpdate: ClusterLoadAssignment__Output): PriorityEntry[] { + const result: PriorityEntry[] = []; + const dropCategories: DropCategory[] = []; + if (edsUpdate.policy) { + for (const dropOverload of edsUpdate.policy.drop_overloads) { + if (!dropOverload.drop_percentage) { + continue; + } + let requestsPerMillion: number; + switch (dropOverload.drop_percentage.denominator) { + case 'HUNDRED': + requestsPerMillion = dropOverload.drop_percentage.numerator * 10_000; + break; + case 'TEN_THOUSAND': + requestsPerMillion = dropOverload.drop_percentage.numerator * 100; + break; + case 'MILLION': + requestsPerMillion = dropOverload.drop_percentage.numerator; + break; + } + dropCategories.push({ + category: dropOverload.category, + requests_per_million: requestsPerMillion + }); + } + } + for (const endpoint of edsUpdate.endpoints) { + if (!endpoint.load_balancing_weight) { + continue; + } + const endpoints: WeightedEndpoint[] = endpoint.lb_endpoints.filter(lbEndpoint => lbEndpoint.health_status === 'UNKNOWN' || lbEndpoint.health_status === 'HEALTHY').map( + (lbEndpoint) => { + /* The validator in the XdsClient class ensures that each endpoint has + * a socket_address with an IP address and a port_value. */ + const socketAddress = lbEndpoint.endpoint!.address!.socket_address!; + return { + endpoint: { + addresses: [{ + host: socketAddress.address!, + port: socketAddress.port_value!, + }] + }, + weight: lbEndpoint.load_balancing_weight?.value ?? 1 + }; + } + ); + if (endpoints.length === 0) { + continue; + } + let priorityEntry: PriorityEntry; + if (result[endpoint.priority]) { + priorityEntry = result[endpoint.priority]; + } else { + priorityEntry = { + localities: [], + dropCategories: dropCategories + }; + result[endpoint.priority] = priorityEntry; + } + priorityEntry.localities.push({ + locality: endpoint.locality!, + endpoints: endpoints, + weight: endpoint.load_balancing_weight.value + }); + } + // Collapse spaces in sparse array + return result.filter(priority => priority); +} + +function getDnsPriorities(endpoints: Endpoint[]): PriorityEntry[] { + return [{ + localities: [{ + locality: { + region: '', + zone: '', + sub_zone: '' + }, + weight: 1, + endpoints: endpoints.map(endpoint => ({endpoint: endpoint, weight: 1})) + }], + dropCategories: [] + }]; +} + +export function localityToName(locality: Locality__Output) { + return `{region=${locality.region},zone=${locality.zone},sub_zone=${locality.sub_zone}}`; +} + +function getNextPriorityName(entry: DiscoveryMechanismEntry): string { + return `cluster=${entry.discoveryMechanism.cluster}, child_number=${entry.nextPriorityChildNumber++}`; +} + +export class XdsClusterResolver implements LoadBalancer { + private discoveryMechanismList: DiscoveryMechanismEntry[] = []; + private latestConfig: XdsClusterResolverLoadBalancingConfig | null = null; + private latestAttributes: { [key: string]: unknown; } = {}; + private xdsClient: XdsClient | null = null; + private childBalancer: ChildLoadBalancerHandler; + + constructor(private readonly channelControlHelper: ChannelControlHelper, options: ChannelOptions) { + this.childBalancer = new ChildLoadBalancerHandler(experimental.createChildChannelControlHelper(channelControlHelper, { + requestReresolution: () => { + for (const entry of this.discoveryMechanismList) { + entry.resolver?.updateResolution(); + } + } + }), options); + } + + private maybeUpdateChild() { + if (!this.latestConfig) { + return; + } + for (const entry of this.discoveryMechanismList) { + if (!entry.latestUpdate) { + return; + } + } + const fullPriorityList: string[] = []; + const priorityChildren: {[name: string]: PriorityChildRaw} = {}; + const endpointList: LocalityEndpoint[] = []; + const edsChildPolicy = this.latestConfig.getXdsLbPolicy(); + for (const entry of this.discoveryMechanismList) { + const newPriorityNames: string[] = []; + const newLocalityPriorities = new Map(); + const xdsClusterImplChildPolicy: LoadBalancingConfig[] = entry.discoveryMechanism.type === 'EDS' ? edsChildPolicy : [{ pick_first: {} }]; + + for (const [priority, priorityEntry] of entry.latestUpdate!.entries()) { + /** + * Highest (smallest number) priority value that any of the localities in + * this locality array had a in the previous mapping. + */ + let highestOldPriority = Infinity; + for (const localityObj of priorityEntry.localities) { + const oldPriority = entry.localityPriorities.get( + localityToName(localityObj.locality) + ); + if ( + oldPriority !== undefined && + oldPriority >= priority && + oldPriority < highestOldPriority + ) { + highestOldPriority = oldPriority; + } + } + let newPriorityName: string; + if (highestOldPriority === Infinity) { + /* No existing priority at or below the same number as the priority we + * are looking at had any of the localities in this priority. So, we + * use a new name. */ + newPriorityName = getNextPriorityName(entry); + } else { + const newName = entry.priorityNames[highestOldPriority]; + if (newPriorityNames.indexOf(newName) < 0) { + newPriorityName = newName; + } else { + newPriorityName = getNextPriorityName(entry); + } + } + newPriorityNames[priority] = newPriorityName; + + for (const localityObj of priorityEntry.localities) { + for (const weightedEndpoint of localityObj.endpoints) { + endpointList.push({ + localityPath: [ + newPriorityName, + localityToName(localityObj.locality), + ], + locality: localityObj.locality, + localityWeight: localityObj.weight, + endpointWeight: localityObj.weight * weightedEndpoint.weight, + ...weightedEndpoint.endpoint + }); + } + newLocalityPriorities.set(localityToName(localityObj.locality), priority); + } + const xdsClusterImplConfig = { + xds_cluster_impl: { + cluster: entry.discoveryMechanism.cluster, + drop_categories: priorityEntry.dropCategories, + max_concurrent_requests: entry.discoveryMechanism.max_concurrent_requests, + eds_service_name: entry.discoveryMechanism.eds_service_name ?? '', + lrs_load_reporting_server: entry.discoveryMechanism.lrs_load_reporting_server, + child_policy: xdsClusterImplChildPolicy + } + } + let priorityChildConfig: LoadBalancingConfig; + if (EXPERIMENTAL_OUTLIER_DETECTION) { + priorityChildConfig = { + outlier_detection: { + ...entry.discoveryMechanism.outlier_detection, + child_policy: [xdsClusterImplConfig] + } + } + } else { + priorityChildConfig = xdsClusterImplConfig; + } + + priorityChildren[newPriorityName] = { + config: [priorityChildConfig], + ignore_reresolution_requests: entry.discoveryMechanism.type === 'EDS' + }; + } + entry.localityPriorities = newLocalityPriorities; + entry.priorityNames = newPriorityNames; + fullPriorityList.push(...newPriorityNames); + } + const childConfig = { + priority: { + children: priorityChildren, + priorities: fullPriorityList + } + } + let typedChildConfig: TypedLoadBalancingConfig; + try { + typedChildConfig = parseLoadBalancingConfig(childConfig); + } catch (e) { + trace('LB policy config parsing failed with error ' + (e as Error).message); + this.channelControlHelper.updateState(connectivityState.TRANSIENT_FAILURE, new UnavailablePicker({code: status.UNAVAILABLE, details: `LB policy config parsing failed with error ${(e as Error).message}`, metadata: new Metadata()})); + return; + } + trace('Child update addresses: ' + endpointList.map(endpoint => '(' + endpointToString(endpoint) + ' path=' + endpoint.localityPath + ')')); + trace('Child update priority config: ' + JSON.stringify(childConfig, undefined, 2)); + this.childBalancer.updateAddressList( + endpointList, + typedChildConfig, + this.latestAttributes + ); + } + + updateAddressList(addressList: Endpoint[], lbConfig: TypedLoadBalancingConfig, attributes: { [key: string]: unknown; }): void { + if (!(lbConfig instanceof XdsClusterResolverLoadBalancingConfig)) { + trace('Discarding address list update with unrecognized config ' + JSON.stringify(lbConfig, undefined, 2)); + return; + } + trace('Received update with config ' + JSON.stringify(lbConfig, undefined, 2)); + this.latestConfig = lbConfig; + this.latestAttributes = attributes; + this.xdsClient = attributes.xdsClient as XdsClient; + if (this.discoveryMechanismList.length === 0) { + for (const mechanism of lbConfig.getDiscoveryMechanisms()) { + const mechanismEntry: DiscoveryMechanismEntry = { + discoveryMechanism: mechanism, + localityPriorities: new Map(), + priorityNames: [], + nextPriorityChildNumber: 0 + }; + if (mechanism.type === 'EDS') { + const edsServiceName = mechanism.eds_service_name ?? mechanism.cluster; + const watcher: Watcher = new Watcher({ + onResourceChanged: update => { + mechanismEntry.latestUpdate = getEdsPriorities(update); + this.maybeUpdateChild(); + }, + onResourceDoesNotExist: () => { + trace('Resource does not exist: ' + edsServiceName); + mechanismEntry.latestUpdate = [{localities: [], dropCategories: []}]; + }, + onError: error => { + if (!mechanismEntry.latestUpdate) { + trace('xDS request failed with error ' + error); + mechanismEntry.latestUpdate = [{localities: [], dropCategories: []}]; + } + } + }); + mechanismEntry.watcher = watcher; + if (this.xdsClient) { + EndpointResourceType.startWatch(this.xdsClient, edsServiceName, watcher); + } + } else { + const resolver = createResolver({scheme: 'dns', path: mechanism.dns_hostname!}, { + onSuccessfulResolution: endpointList => { + mechanismEntry.latestUpdate = getDnsPriorities(endpointList); + this.maybeUpdateChild(); + }, + onError: error => { + if (!mechanismEntry.latestUpdate) { + trace('DNS resolution for ' + mechanism.dns_hostname + ' failed with error ' + error); + mechanismEntry.latestUpdate = [{localities: [], dropCategories: []}]; + } + } + }, {'grpc.service_config_disable_resolution': 1}); + mechanismEntry.resolver = resolver; + resolver.updateResolution(); + } + this.discoveryMechanismList.push(mechanismEntry); + } + } else { + /* The ChildLoadBalancerHandler subclass guarantees that each discovery + * mechanism in the new update corresponds to the same entry in the + * existing discoveryMechanismList, and that any differences will not + * result in changes to the watcher/resolver. */ + for (let i = 0; i < this.discoveryMechanismList.length; i++) { + this.discoveryMechanismList[i].discoveryMechanism = lbConfig.getDiscoveryMechanisms()[i]; + } + this.maybeUpdateChild(); + } + } + exitIdle(): void { + this.childBalancer.exitIdle(); + } + resetBackoff(): void { + this.childBalancer.resetBackoff(); + } + destroy(): void { + for (const mechanismEntry of this.discoveryMechanismList) { + if (mechanismEntry.watcher) { + const edsServiceName = mechanismEntry.discoveryMechanism.eds_service_name ?? mechanismEntry.discoveryMechanism.cluster; + if (this.xdsClient) { + EndpointResourceType.cancelWatch(this.xdsClient, edsServiceName, mechanismEntry.watcher); + } + } + mechanismEntry.resolver?.destroy(); + } + this.discoveryMechanismList = []; + this.childBalancer.destroy(); + } + getTypeName(): string { + return TYPE_NAME; + } +} + +function maybeServerConfigEqual(config1: XdsServerConfig | undefined, config2: XdsServerConfig | undefined) { + if (config1 !== undefined && config2 !== undefined) { + return serverConfigEqual(config1, config2); + } else { + return config1 === config2; + } +} + +export class XdsClusterResolverChildPolicyHandler extends ChildLoadBalancerHandler { + protected configUpdateRequiresNewPolicyInstance(oldConfig: TypedLoadBalancingConfig, newConfig: TypedLoadBalancingConfig): boolean { + if (!(oldConfig instanceof XdsClusterResolverLoadBalancingConfig && newConfig instanceof XdsClusterResolverLoadBalancingConfig)) { + return super.configUpdateRequiresNewPolicyInstance(oldConfig, newConfig); + } + if (oldConfig.getDiscoveryMechanisms().length !== newConfig.getDiscoveryMechanisms().length) { + return true; + } + for (let i = 0; i < oldConfig.getDiscoveryMechanisms().length; i++) { + const oldDiscoveryMechanism = oldConfig.getDiscoveryMechanisms()[i]; + const newDiscoveryMechanism = newConfig.getDiscoveryMechanisms()[i]; + if (oldDiscoveryMechanism.type !== newDiscoveryMechanism.type || + oldDiscoveryMechanism.cluster !== newDiscoveryMechanism.cluster || + oldDiscoveryMechanism.eds_service_name !== newDiscoveryMechanism.eds_service_name || + oldDiscoveryMechanism.dns_hostname !== newDiscoveryMechanism.dns_hostname || + !maybeServerConfigEqual(oldDiscoveryMechanism.lrs_load_reporting_server, newDiscoveryMechanism.lrs_load_reporting_server)) { + return true; + } + } + return false; + } +} + +export function setup() { + registerLoadBalancerType(TYPE_NAME, XdsClusterResolver, XdsClusterResolverLoadBalancingConfig); +} diff --git a/packages/grpc-js-xds/src/load-balancer-xds-wrr-locality.ts b/packages/grpc-js-xds/src/load-balancer-xds-wrr-locality.ts new file mode 100644 index 000000000..3fb57d6e2 --- /dev/null +++ b/packages/grpc-js-xds/src/load-balancer-xds-wrr-locality.ts @@ -0,0 +1,168 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// https://github.com/grpc/proposal/blob/master/A52-xds-custom-lb-policies.md + +import { ChannelOptions, LoadBalancingConfig, experimental, logVerbosity } from "@grpc/grpc-js"; +import { loadProtosWithOptionsSync } from "@grpc/proto-loader/build/src/util"; +import { WeightedTargetRaw } from "./load-balancer-weighted-target"; +import { isLocalityEndpoint } from "./load-balancer-priority"; +import { localityToName } from "./load-balancer-xds-cluster-resolver"; +import TypedLoadBalancingConfig = experimental.TypedLoadBalancingConfig; +import LoadBalancer = experimental.LoadBalancer; +import ChannelControlHelper = experimental.ChannelControlHelper; +import ChildLoadBalancerHandler = experimental.ChildLoadBalancerHandler; +import Endpoint = experimental.Endpoint; +import parseLoadBalancingConfig = experimental.parseLoadBalancingConfig; +import registerLoadBalancerType = experimental.registerLoadBalancerType; +import { Any__Output } from "./generated/google/protobuf/Any"; +import { WrrLocality__Output } from "./generated/envoy/extensions/load_balancing_policies/wrr_locality/v3/WrrLocality"; +import { TypedExtensionConfig__Output } from "./generated/envoy/config/core/v3/TypedExtensionConfig"; +import { LoadBalancingPolicy__Output } from "./generated/envoy/config/cluster/v3/LoadBalancingPolicy"; +import { registerLbPolicy } from "./lb-policy-registry"; + +const TRACER_NAME = 'xds_wrr_locality'; + +function trace(text: string): void { + experimental.trace(logVerbosity.DEBUG, TRACER_NAME, text); +} + +const TYPE_NAME = 'xds_wrr_locality'; + +class XdsWrrLocalityLoadBalancingConfig implements TypedLoadBalancingConfig { + getLoadBalancerName(): string { + return TYPE_NAME; + } + toJsonObject(): object { + return { + [TYPE_NAME]: { + child_policy: this.childPolicy + } + } + } + + constructor(private childPolicy: LoadBalancingConfig[]) {} + + getChildPolicy() { + return this.childPolicy; + } + + static createFromJson(obj: any): XdsWrrLocalityLoadBalancingConfig { + if (!('child_policy' in obj && Array.isArray(obj.child_policy))) { + throw new Error('xds_wrr_locality config must have a child_policy array'); + } + return new XdsWrrLocalityLoadBalancingConfig( + obj.child_policy + ); + } +} + +class XdsWrrLocalityLoadBalancer implements LoadBalancer { + private childBalancer: ChildLoadBalancerHandler; + constructor(private readonly channelControlHelper: ChannelControlHelper, options: ChannelOptions) { + this.childBalancer = new ChildLoadBalancerHandler(channelControlHelper, options); + } + updateAddressList(endpointList: Endpoint[], lbConfig: TypedLoadBalancingConfig, attributes: { [key: string]: unknown; }): void { + if (!(lbConfig instanceof XdsWrrLocalityLoadBalancingConfig)) { + trace('Discarding address list update with unrecognized config ' + JSON.stringify(lbConfig, undefined, 2)); + return; + } + const targets: {[localityName: string]: WeightedTargetRaw} = {}; + for (const address of endpointList) { + if (!isLocalityEndpoint(address)) { + return; + } + const localityName = localityToName(address.locality); + if (!(localityName in targets)) { + targets[localityName] = { + child_policy: lbConfig.getChildPolicy(), + weight: address.localityWeight + }; + } + } + const childConfig = { + weighted_target: { + targets: targets + } + }; + this.childBalancer.updateAddressList(endpointList, parseLoadBalancingConfig(childConfig), attributes); + } + exitIdle(): void { + this.childBalancer.exitIdle(); + } + resetBackoff(): void { + this.childBalancer.resetBackoff(); + } + destroy(): void { + this.childBalancer.destroy(); + } + getTypeName(): string { + return TYPE_NAME; + } +} + +const WRR_LOCALITY_TYPE_URL = 'type.googleapis.com/envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality'; + +const resourceRoot = loadProtosWithOptionsSync([ + 'envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.proto'], { + keepCase: true, + includeDirs: [ + // Paths are relative to src/build + __dirname + '/../../deps/envoy-api/', + __dirname + '/../../deps/xds/', + __dirname + '/../../deps/protoc-gen-validate' + ], + } +); + +const toObjectOptions = { + longs: String, + enums: String, + defaults: true, + oneofs: true +} + +function decodeWrrLocality(message: Any__Output): WrrLocality__Output { + const name = message.type_url.substring(message.type_url.lastIndexOf('/') + 1); + const type = resourceRoot.lookup(name); + if (type) { + const decodedMessage = (type as any).decode(message.value); + return decodedMessage.$type.toObject(decodedMessage, toObjectOptions) as WrrLocality__Output; + } else { + throw new Error(`TypedStruct parsing error: unexpected type URL ${message.type_url}`); + } +} + +function convertToLoadBalancingPolicy(protoPolicy: TypedExtensionConfig__Output, selectChildPolicy: (childPolicy: LoadBalancingPolicy__Output) => LoadBalancingConfig): LoadBalancingConfig { + if (protoPolicy.typed_config?.type_url !== WRR_LOCALITY_TYPE_URL) { + throw new Error(`WRR Locality LB policy parsing error: unexpected type URL ${protoPolicy.typed_config?.type_url}`); + } + const wrrLocalityMessage = decodeWrrLocality(protoPolicy.typed_config); + if (!wrrLocalityMessage.endpoint_picking_policy) { + throw new Error('WRR Locality LB parsing error: no endpoint_picking_policy specified'); + } + return { + [TYPE_NAME]: { + child_policy: [selectChildPolicy(wrrLocalityMessage.endpoint_picking_policy)] + } + }; +} + +export function setup() { + registerLoadBalancerType(TYPE_NAME, XdsWrrLocalityLoadBalancer, XdsWrrLocalityLoadBalancingConfig); + registerLbPolicy(WRR_LOCALITY_TYPE_URL, convertToLoadBalancingPolicy); +} diff --git a/packages/grpc-js-xds/src/matcher.ts b/packages/grpc-js-xds/src/matcher.ts index 14cb7f672..b657d32c9 100644 --- a/packages/grpc-js-xds/src/matcher.ts +++ b/packages/grpc-js-xds/src/matcher.ts @@ -16,6 +16,7 @@ import { Metadata } from "@grpc/grpc-js"; import { RE2 } from "re2-wasm"; +import { Fraction, fractionToString } from "./fraction"; /** * An object representing a predicate that determines whether a given @@ -36,14 +37,19 @@ export interface ValueMatcher { } export class ExactValueMatcher implements ValueMatcher { - constructor(private targetValue: string) {} + constructor(private targetValue: string, private ignoreCase: boolean) { + } apply(value: string) { - return value === this.targetValue; + if (this.ignoreCase) { + return value.toLowerCase() === this.targetValue.toLowerCase(); + } else { + return value === this.targetValue; + } } toString() { - return 'Exact(' + this.targetValue + ')'; + return 'Exact(' + this.targetValue + ', ignore_case=' + this.ignoreCase + ')'; } } @@ -65,7 +71,7 @@ export class SafeRegexValueMatcher implements ValueMatcher { const numberRegex = new RE2(/^-?\d+$/u); export class RangeValueMatcher implements ValueMatcher { - constructor(private start: BigInt, private end: BigInt) {} + constructor(private start: bigint, private end: bigint) {} apply(value: string) { if (!numberRegex.test(value)) { @@ -93,26 +99,51 @@ export class PresentValueMatcher implements ValueMatcher { } export class PrefixValueMatcher implements ValueMatcher { - constructor(private prefix: string) {} + constructor(private prefix: string, private ignoreCase: boolean) { + } apply(value: string) { - return value.startsWith(this.prefix); + if (this.ignoreCase) { + return value.toLowerCase().startsWith(this.prefix.toLowerCase()); + } else { + return value.startsWith(this.prefix); + } } toString() { - return 'Prefix(' + this.prefix + ')'; + return 'Prefix(' + this.prefix + ', ignore_case=' + this.ignoreCase + ')'; } } export class SuffixValueMatcher implements ValueMatcher { - constructor(private suffix: string) {} + constructor(private suffix: string, private ignoreCase: boolean) {} + + apply(value: string) { + if (this.ignoreCase) { + return value.toLowerCase().endsWith(this.suffix.toLowerCase()); + } else { + return value.endsWith(this.suffix); + } + } + + toString() { + return 'Suffix(' + this.suffix + ', ignore_case=' + this.ignoreCase + ')'; + } +} + +export class ContainsValueMatcher implements ValueMatcher { + constructor(private contains: string, private ignoreCase: boolean) {} apply(value: string) { - return value.endsWith(this.suffix); + if (this.ignoreCase) { + return value.toLowerCase().includes(this.contains.toLowerCase()); + } else { + return value.includes(this.contains); + } } toString() { - return 'Suffix(' + this.suffix + ')'; + return 'Contains(' + this.contains + + ', ignore_case=' + this.ignoreCase + ')'; } } @@ -210,15 +241,6 @@ export class PathSafeRegexValueMatcher { } } -export interface Fraction { - numerator: number; - denominator: number; -} - -function fractionToString(fraction: Fraction): string { - return `${fraction.numerator}/${fraction.denominator}`; -} - export class FullMatcher implements Matcher { constructor(private pathMatcher: ValueMatcher, private headerMatchers: Matcher[], private fraction: Fraction | null) {} @@ -242,4 +264,4 @@ export class FullMatcher implements Matcher { headers: ${this.headerMatchers.map(matcher => matcher.toString()).join('\n\t')} fraction: ${this.fraction ? fractionToString(this.fraction): 'none'}`; } -} \ No newline at end of file +} diff --git a/packages/grpc-js-xds/src/protobuf-any.ts b/packages/grpc-js-xds/src/protobuf-any.ts new file mode 100644 index 000000000..cfee35f91 --- /dev/null +++ b/packages/grpc-js-xds/src/protobuf-any.ts @@ -0,0 +1,23 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This is a non-public, unstable API, but it's very convenient +import { loadProtosWithOptionsSync } from '@grpc/proto-loader/build/src/util'; +import { Any__Output } from './generated/google/protobuf/Any'; + +function parseAnyMessage(encodedMessage: Any__Output) { + +} \ No newline at end of file diff --git a/packages/grpc-js-xds/src/resolver-xds.ts b/packages/grpc-js-xds/src/resolver-xds.ts index aee7290bb..b02764a79b 100644 --- a/packages/grpc-js-xds/src/resolver-xds.ts +++ b/packages/grpc-js-xds/src/resolver-xds.ts @@ -18,28 +18,35 @@ import * as protoLoader from '@grpc/proto-loader'; import { RE2 } from 're2-wasm'; -import { getSingletonXdsClient, XdsClient } from './xds-client'; -import { StatusObject, status, logVerbosity, Metadata, experimental, ChannelOptions } from '@grpc/grpc-js'; +import { getSingletonXdsClient, Watcher, XdsClient } from './xds-client'; +import { StatusObject, status, logVerbosity, Metadata, experimental, ChannelOptions, ServiceConfig, LoadBalancingConfig, RetryPolicy } from '@grpc/grpc-js'; import Resolver = experimental.Resolver; import GrpcUri = experimental.GrpcUri; import ResolverListener = experimental.ResolverListener; import uriToString = experimental.uriToString; -import ServiceConfig = experimental.ServiceConfig; import registerResolver = experimental.registerResolver; -import { Listener__Output } from './generated/envoy/api/v2/Listener'; -import { Watcher } from './xds-stream-state/xds-stream-state'; -import { RouteConfiguration__Output } from './generated/envoy/api/v2/RouteConfiguration'; -import { HttpConnectionManager__Output } from './generated/envoy/config/filter/network/http_connection_manager/v2/HttpConnectionManager'; -import { CdsLoadBalancingConfig } from './load-balancer-cds'; -import { VirtualHost__Output } from './generated/envoy/api/v2/route/VirtualHost'; -import { RouteMatch__Output } from './generated/envoy/api/v2/route/RouteMatch'; -import { HeaderMatcher__Output } from './generated/envoy/api/v2/route/HeaderMatcher'; +import { Listener__Output } from './generated/envoy/config/listener/v3/Listener'; +import { RouteConfiguration__Output } from './generated/envoy/config/route/v3/RouteConfiguration'; +import { HttpConnectionManager__Output } from './generated/envoy/extensions/filters/network/http_connection_manager/v3/HttpConnectionManager'; +import { VirtualHost__Output } from './generated/envoy/config/route/v3/VirtualHost'; +import { RouteMatch__Output } from './generated/envoy/config/route/v3/RouteMatch'; +import { HeaderMatcher__Output } from './generated/envoy/config/route/v3/HeaderMatcher'; import ConfigSelector = experimental.ConfigSelector; -import LoadBalancingConfig = experimental.LoadBalancingConfig; -import { XdsClusterManagerLoadBalancingConfig } from './load-balancer-xds-cluster-manager'; -import { ExactValueMatcher, Fraction, FullMatcher, HeaderMatcher, Matcher, PathExactValueMatcher, PathPrefixValueMatcher, PathSafeRegexValueMatcher, PrefixValueMatcher, PresentValueMatcher, RangeValueMatcher, RejectValueMatcher, SafeRegexValueMatcher, SuffixValueMatcher, ValueMatcher } from './matcher'; -import { RouteAction, SingleClusterRouteAction, WeightedCluster, WeightedClusterRouteAction } from './route-action'; -import { LogVerbosity } from '@grpc/grpc-js/build/src/constants'; +import { ContainsValueMatcher, ExactValueMatcher, FullMatcher, HeaderMatcher, Matcher, PathExactValueMatcher, PathPrefixValueMatcher, PathSafeRegexValueMatcher, PrefixValueMatcher, PresentValueMatcher, RangeValueMatcher, RejectValueMatcher, SafeRegexValueMatcher, SuffixValueMatcher, ValueMatcher } from './matcher'; +import { envoyFractionToFraction, Fraction } from "./fraction"; +import { HashPolicy, RouteAction, SingleClusterRouteAction, WeightedCluster, WeightedClusterRouteAction } from './route-action'; +import { decodeSingleResource, HTTP_CONNECTION_MANGER_TYPE_URL } from './resources'; +import Duration = experimental.Duration; +import { Duration__Output } from './generated/google/protobuf/Duration'; +import { createHttpFilter, HttpFilterConfig, parseOverrideFilterConfig, parseTopLevelFilterConfig } from './http-filter'; +import { EXPERIMENTAL_FAULT_INJECTION, EXPERIMENTAL_FEDERATION, EXPERIMENTAL_RETRY, EXPERIMENTAL_RING_HASH } from './environment'; +import Filter = experimental.Filter; +import FilterFactory = experimental.FilterFactory; +import { BootstrapInfo, loadBootstrapInfo, validateBootstrapConfig } from './xds-bootstrap'; +import { ListenerResourceType } from './xds-resource-type/listener-resource-type'; +import { RouteConfigurationResourceType } from './xds-resource-type/route-config-resource-type'; +import { protoDurationToDuration } from './duration'; +import { loadXxhashApi } from './xxhash'; const TRACER_NAME = 'xds_resolver'; @@ -127,7 +134,7 @@ function getPredicateForHeaderMatcher(headerMatch: HeaderMatcher__Output): Match let valueChecker: ValueMatcher; switch (headerMatch.header_match_specifier) { case 'exact_match': - valueChecker = new ExactValueMatcher(headerMatch.exact_match!); + valueChecker = new ExactValueMatcher(headerMatch.exact_match!, false); break; case 'safe_regex_match': valueChecker = new SafeRegexValueMatcher(headerMatch.safe_regex_match!.regex); @@ -141,10 +148,30 @@ function getPredicateForHeaderMatcher(headerMatch: HeaderMatcher__Output): Match valueChecker = new PresentValueMatcher(); break; case 'prefix_match': - valueChecker = new PrefixValueMatcher(headerMatch.prefix_match!); + valueChecker = new PrefixValueMatcher(headerMatch.prefix_match!, false); break; case 'suffix_match': - valueChecker = new SuffixValueMatcher(headerMatch.suffix_match!); + valueChecker = new SuffixValueMatcher(headerMatch.suffix_match!, false); + break; + case 'string_match': + const stringMatch = headerMatch.string_match! + switch (stringMatch.match_pattern) { + case 'exact': + valueChecker = new ExactValueMatcher(stringMatch.exact!, stringMatch.ignore_case); + break; + case 'safe_regex': + valueChecker = new SafeRegexValueMatcher(stringMatch.safe_regex!.regex); + break; + case 'prefix': + valueChecker = new PrefixValueMatcher(stringMatch.prefix!, stringMatch.ignore_case); + break; + case 'suffix': + valueChecker = new SuffixValueMatcher(stringMatch.suffix!, stringMatch.ignore_case); + break; + case 'contains': + valueChecker = new ContainsValueMatcher(stringMatch.contains!, stringMatch.ignore_case); + break; + } break; default: valueChecker = new RejectValueMatcher(); @@ -152,12 +179,6 @@ function getPredicateForHeaderMatcher(headerMatch: HeaderMatcher__Output): Match return new HeaderMatcher(headerMatch.name, valueChecker, headerMatch.invert_match); } -const RUNTIME_FRACTION_DENOMINATOR_VALUES = { - HUNDRED: 100, - TEN_THOUSAND: 10_000, - MILLION: 1_000_000 -} - function getPredicateForMatcher(routeMatch: RouteMatch__Output): Matcher { let pathMatcher: ValueMatcher; const caseInsensitive = routeMatch.case_sensitive?.value === false; @@ -179,20 +200,67 @@ function getPredicateForMatcher(routeMatch: RouteMatch__Output): Matcher { if (!routeMatch.runtime_fraction?.default_value) { runtimeFraction = null; } else { - runtimeFraction = { - numerator: routeMatch.runtime_fraction.default_value.numerator, - denominator: RUNTIME_FRACTION_DENOMINATOR_VALUES[routeMatch.runtime_fraction.default_value.denominator] - }; + runtimeFraction = envoyFractionToFraction(routeMatch.runtime_fraction.default_value) } return new FullMatcher(pathMatcher, headerMatchers, runtimeFraction); } +function protoDurationToSecondsString(duration: Duration__Output): string { + return `${duration.seconds + duration.nanos / 1_000_000_000}s`; +} + +const DEFAULT_RETRY_BASE_INTERVAL = '0.025s' + +function getDefaultRetryMaxInterval(baseInterval: string): string { + return `${Number.parseFloat(baseInterval.substring(0, baseInterval.length - 1)) * 10}s`; +} + +/** + * Encode a text string as a valid path of a URI, as specified in RFC-3986 section 3.3 + * @param uriPath A value representing an unencoded URI path + * @returns + */ +function encodeURIPath(uriPath: string): string { + return uriPath.replace(/[^A-Za-z0-9._~!$&^()*+,;=/-]/g, substring => encodeURIComponent(substring)); +} + +function formatTemplateString(templateString: string, value: string): string { + if (templateString.startsWith('xdstp:')) { + return templateString.replace(/%s/g, encodeURIPath(value)); + } else { + return templateString.replace(/%s/g, value); + } +} + +export function getListenerResourceName(bootstrapConfig: BootstrapInfo, target: GrpcUri): string { + if (target.authority && target.authority !== '') { + if (target.authority in bootstrapConfig.authorities) { + return formatTemplateString(bootstrapConfig.authorities[target.authority].clientListenerResourceNameTemplate, target.path); + } else { + throw new Error(`Authority ${target.authority} not found in bootstrap file`); + } + } else { + return formatTemplateString(bootstrapConfig.clientDefaultListenerResourceNameTemplate, target.path); + } +} + +const BOOTSTRAP_CONFIG_KEY = 'grpc.TEST_ONLY_DO_NOT_USE_IN_PROD.xds_bootstrap_config'; + +const RETRY_CODES: {[key: string]: status} = { + 'cancelled': status.CANCELLED, + 'deadline-exceeded': status.DEADLINE_EXCEEDED, + 'internal': status.INTERNAL, + 'resource-exhausted': status.RESOURCE_EXHAUSTED, + 'unavailable': status.UNAVAILABLE +}; + class XdsResolver implements Resolver { private hasReportedSuccess = false; private ldsWatcher: Watcher; private rdsWatcher: Watcher private isLdsWatcherActive = false; + private listenerResourceName: string | null = null; /** * The latest route config name from an LDS response. The RDS watcher is * actively watching that name if and only if this is not null. @@ -203,31 +271,60 @@ class XdsResolver implements Resolver { private clusterRefcounts = new Map(); + private latestDefaultTimeout: Duration | undefined = undefined; + + private ldsHttpFilterConfigs: {name: string, config: HttpFilterConfig}[] = []; + + private bootstrapInfo: BootstrapInfo | null = null; + + private xdsClient: XdsClient; + constructor( private target: GrpcUri, private listener: ResolverListener, private channelOptions: ChannelOptions ) { - this.ldsWatcher = { - onValidUpdate: (update: Listener__Output) => { - const httpConnectionManager = update.api_listener! - .api_listener as protoLoader.AnyExtension & - HttpConnectionManager__Output; + if (channelOptions[BOOTSTRAP_CONFIG_KEY]) { + const parsedConfig = JSON.parse(channelOptions[BOOTSTRAP_CONFIG_KEY]); + this.bootstrapInfo = validateBootstrapConfig(parsedConfig); + this.xdsClient = new XdsClient(this.bootstrapInfo); + } else { + this.xdsClient = getSingletonXdsClient(); + } + this.ldsWatcher = new Watcher({ + onResourceChanged: (update: Listener__Output) => { + const httpConnectionManager = decodeSingleResource(HTTP_CONNECTION_MANGER_TYPE_URL, update.api_listener!.api_listener!.value); + const defaultTimeout = httpConnectionManager.common_http_protocol_options?.idle_timeout; + if (defaultTimeout === null || defaultTimeout === undefined) { + this.latestDefaultTimeout = undefined; + } else { + this.latestDefaultTimeout = protoDurationToDuration(defaultTimeout); + } + if (EXPERIMENTAL_FAULT_INJECTION) { + this.ldsHttpFilterConfigs = []; + for (const filter of httpConnectionManager.http_filters) { + // typed_config must be set here, or validation would have failed + const filterConfig = parseTopLevelFilterConfig(filter.typed_config!); + if (filterConfig) { + this.ldsHttpFilterConfigs.push({name: filter.name, config: filterConfig}); + } + } + } switch (httpConnectionManager.route_specifier) { case 'rds': { const routeConfigName = httpConnectionManager.rds!.route_config_name; if (this.latestRouteConfigName !== routeConfigName) { if (this.latestRouteConfigName !== null) { - getSingletonXdsClient().removeRouteWatcher(this.latestRouteConfigName, this.rdsWatcher); + RouteConfigurationResourceType.cancelWatch(this.xdsClient, this.latestRouteConfigName, this.rdsWatcher); } - getSingletonXdsClient().addRouteWatcher(httpConnectionManager.rds!.route_config_name, this.rdsWatcher); + RouteConfigurationResourceType.startWatch(this.xdsClient, routeConfigName, this.rdsWatcher); this.latestRouteConfigName = routeConfigName; } break; } case 'route_config': if (this.latestRouteConfigName) { - getSingletonXdsClient().removeRouteWatcher(this.latestRouteConfigName, this.rdsWatcher); + RouteConfigurationResourceType.cancelWatch(this.xdsClient, this.latestRouteConfigName, this.rdsWatcher); } this.handleRouteConfig(httpConnectionManager.route_config!); break; @@ -235,7 +332,7 @@ class XdsResolver implements Resolver { // This is prevented by the validation rules } }, - onTransientError: (error: StatusObject) => { + onError: (error: StatusObject) => { /* A transient error only needs to bubble up as a failure if we have * not already provided a ServiceConfig for the upper layer to use */ if (!this.hasReportedSuccess) { @@ -247,12 +344,12 @@ class XdsResolver implements Resolver { trace('Resolution error for target ' + uriToString(this.target) + ': LDS resource does not exist'); this.reportResolutionError(`Listener ${this.target} does not exist`); } - }; - this.rdsWatcher = { - onValidUpdate: (update: RouteConfiguration__Output) => { + }); + this.rdsWatcher = new Watcher({ + onResourceChanged: (update: RouteConfiguration__Output) => { this.handleRouteConfig(update); }, - onTransientError: (error: StatusObject) => { + onError: (error: StatusObject) => { /* A transient error only needs to bubble up as a failure if we have * not already provided a ServiceConfig for the upper layer to use */ if (!this.hasReportedSuccess) { @@ -264,7 +361,7 @@ class XdsResolver implements Resolver { trace('Resolution error for target ' + uriToString(this.target) + ' and route config ' + this.latestRouteConfigName + ': RDS resource does not exist'); this.reportResolutionError(`Route config ${this.latestRouteConfigName} does not exist`); } - } + }); } private refCluster(clusterName: string) { @@ -285,35 +382,181 @@ class XdsResolver implements Resolver { } } - private handleRouteConfig(routeConfig: RouteConfiguration__Output) { + private async handleRouteConfig(routeConfig: RouteConfiguration__Output) { + /* We need to load the xxhash API before this function finishes, because + * it is invoked in the config selector, which can be called immediately + * after this function returns. */ + await loadXxhashApi(); this.latestRouteConfig = routeConfig; - const virtualHost = findVirtualHostForDomain(routeConfig.virtual_hosts, this.target.path); + /* Select the virtual host using the default authority override if it + * exists, and the channel target otherwise. */ + const hostDomain = this.channelOptions['grpc.default_authority'] ?? this.target.path; + const virtualHost = findVirtualHostForDomain(routeConfig.virtual_hosts, hostDomain); if (virtualHost === null) { - this.reportResolutionError('No matching route found'); + this.reportResolutionError('No matching route found for ' + hostDomain); return; } + const virtualHostHttpFilterOverrides = new Map(); + if (EXPERIMENTAL_FAULT_INJECTION) { + for (const [name, filter] of Object.entries(virtualHost.typed_per_filter_config ?? {})) { + const parsedConfig = parseOverrideFilterConfig(filter); + if (parsedConfig) { + virtualHostHttpFilterOverrides.set(name, parsedConfig); + } + } + } trace('Received virtual host config ' + JSON.stringify(virtualHost, undefined, 2)); const allConfigClusters = new Set(); const matchList: {matcher: Matcher, action: RouteAction}[] = []; for (const route of virtualHost.routes) { let routeAction: RouteAction; + let timeout: Duration | undefined; + /* For field prioritization see + * https://github.com/grpc/proposal/blob/master/A31-xds-timeout-support-and-config-selector.md#supported-fields + */ + if (route.route?.max_stream_duration?.grpc_timeout_header_max) { + timeout = protoDurationToDuration(route.route.max_stream_duration.grpc_timeout_header_max); + } else if (route.route?.max_stream_duration?.max_stream_duration) { + timeout = protoDurationToDuration(route.route.max_stream_duration.max_stream_duration); + } else { + timeout = this.latestDefaultTimeout; + } + // "A value of 0 indicates the application's deadline is used without modification." + if (timeout?.seconds === 0 && timeout.nanos === 0) { + timeout = undefined; + } + const routeHttpFilterOverrides = new Map(); + if (EXPERIMENTAL_FAULT_INJECTION) { + for (const [name, filter] of Object.entries(route.typed_per_filter_config ?? {})) { + const parsedConfig = parseOverrideFilterConfig(filter); + if (parsedConfig) { + routeHttpFilterOverrides.set(name, parsedConfig); + } + } + } + let retryPolicy: RetryPolicy | undefined = undefined; + if (EXPERIMENTAL_RETRY) { + const retryConfig = route.route!.retry_policy ?? virtualHost.retry_policy; + if (retryConfig) { + const retryableStatusCodes = []; + for (const code of retryConfig.retry_on.split(',')) { + if (RETRY_CODES[code]) { + retryableStatusCodes.push(RETRY_CODES[code]); + } + } + if (retryableStatusCodes.length > 0) { + const baseInterval = retryConfig.retry_back_off?.base_interval ? + protoDurationToSecondsString(retryConfig.retry_back_off.base_interval) : + DEFAULT_RETRY_BASE_INTERVAL; + const maxInterval = retryConfig.retry_back_off?.max_interval ? + protoDurationToSecondsString(retryConfig.retry_back_off.max_interval) : + getDefaultRetryMaxInterval(baseInterval); + retryPolicy = { + backoffMultiplier: 2, + initialBackoff: baseInterval, + maxBackoff: maxInterval, + maxAttempts: (retryConfig.num_retries?.value ?? 1) + 1, + retryableStatusCodes: retryableStatusCodes + }; + } + } + } + const hashPolicies: HashPolicy[] = []; + if (EXPERIMENTAL_RING_HASH) { + for (const routeHashPolicy of route.route!.hash_policy) { + if (routeHashPolicy.policy_specifier === 'header') { + const headerPolicy = routeHashPolicy.header!; + hashPolicies.push({ + type: 'HEADER', + terminal: routeHashPolicy.terminal, + headerName: headerPolicy.header_name, + regex: headerPolicy.regex_rewrite?.pattern ? new RE2(headerPolicy.regex_rewrite.pattern.regex, 'ug') : undefined, + regexSubstitution: headerPolicy.regex_rewrite?.substitution + }); + } else if (routeHashPolicy.policy_specifier === 'filter_state' && routeHashPolicy.filter_state!.key === 'io.grpc.channel_id') { + hashPolicies.push({ + type: 'CHANNEL_ID', + terminal: routeHashPolicy.terminal + }); + } + } + } switch (route.route!.cluster_specifier) { case 'cluster_header': continue; case 'cluster':{ const cluster = route.route!.cluster!; allConfigClusters.add(cluster); - routeAction = new SingleClusterRouteAction(cluster); + const extraFilterFactories: FilterFactory[] = []; + if (EXPERIMENTAL_FAULT_INJECTION) { + for (const filterConfig of this.ldsHttpFilterConfigs) { + if (routeHttpFilterOverrides.has(filterConfig.name)) { + const filter = createHttpFilter(filterConfig.config, routeHttpFilterOverrides.get(filterConfig.name)!); + if (filter) { + extraFilterFactories.push(filter); + } + } else if (virtualHostHttpFilterOverrides.has(filterConfig.name)) { + const filter = createHttpFilter(filterConfig.config, virtualHostHttpFilterOverrides.get(filterConfig.name)!); + if (filter) { + extraFilterFactories.push(filter); + } + } else { + const filter = createHttpFilter(filterConfig.config); + if (filter) { + extraFilterFactories.push(filter); + } + } + } + } + routeAction = new SingleClusterRouteAction(cluster, {name: [], timeout: timeout, retryPolicy: retryPolicy}, extraFilterFactories, hashPolicies); break; } case 'weighted_clusters': { const weightedClusters: WeightedCluster[] = []; for (const clusterWeight of route.route!.weighted_clusters!.clusters) { allConfigClusters.add(clusterWeight.name); - weightedClusters.push({name: clusterWeight.name, weight: clusterWeight.weight?.value ?? 0}); + const extraFilterFactories: FilterFactory[] = []; + const clusterHttpFilterOverrides = new Map(); + if (EXPERIMENTAL_FAULT_INJECTION) { + for (const [name, filter] of Object.entries(clusterWeight.typed_per_filter_config ?? {})) { + const parsedConfig = parseOverrideFilterConfig(filter); + if (parsedConfig) { + clusterHttpFilterOverrides.set(name, parsedConfig); + } + } + for (const filterConfig of this.ldsHttpFilterConfigs) { + if (clusterHttpFilterOverrides.has(filterConfig.name)) { + const filter = createHttpFilter(filterConfig.config, clusterHttpFilterOverrides.get(filterConfig.name)!); + if (filter) { + extraFilterFactories.push(filter); + } + } else if (routeHttpFilterOverrides.has(filterConfig.name)) { + const filter = createHttpFilter(filterConfig.config, routeHttpFilterOverrides.get(filterConfig.name)!); + if (filter) { + extraFilterFactories.push(filter); + } + } else if (virtualHostHttpFilterOverrides.has(filterConfig.name)) { + const filter = createHttpFilter(filterConfig.config, virtualHostHttpFilterOverrides.get(filterConfig.name)!); + if (filter) { + extraFilterFactories.push(filter); + } + } else { + const filter = createHttpFilter(filterConfig.config); + if (filter) { + extraFilterFactories.push(filter); + } + } + } + } + weightedClusters.push({name: clusterWeight.name, weight: clusterWeight.weight?.value ?? 0, dynamicFilterFactories: extraFilterFactories}); } - routeAction = new WeightedClusterRouteAction(weightedClusters, route.route!.weighted_clusters!.total_weight?.value ?? 100); + routeAction = new WeightedClusterRouteAction(weightedClusters, route.route!.weighted_clusters!.total_weight?.value ?? 100, {name: [], timeout: timeout, retryPolicy: retryPolicy}, hashPolicies); + break; } + default: + /* The validation logic should prevent us from reaching this point. + * This is just for the type checker. */ + continue; } const routeMatcher = getPredicateForMatcher(route.match!); matchList.push({matcher: routeMatcher, action: routeAction}); @@ -336,27 +579,35 @@ class XdsResolver implements Resolver { this.clusterRefcounts.set(name, {inLastConfig: true, refCount: 0}); } } - const configSelector: ConfigSelector = (methodName, metadata) => { + const configSelector: ConfigSelector = (methodName, metadata, channelId) => { for (const {matcher, action} of matchList) { if (matcher.apply(methodName, metadata)) { - const clusterName = action.getCluster(); - this.refCluster(clusterName); + const clusterResult = action.getCluster(); + this.refCluster(clusterResult.name); const onCommitted = () => { - this.unrefCluster(clusterName); + this.unrefCluster(clusterResult.name); + } + let hash: string; + if (EXPERIMENTAL_RING_HASH) { + hash = `${action.getHash(metadata, channelId)}`; + } else { + hash = ''; } return { - methodConfig: {name: []}, + methodConfig: clusterResult.methodConfig, onCommitted: onCommitted, - pickInformation: {cluster: clusterName}, - status: status.OK + pickInformation: {cluster: clusterResult.name, hash: hash}, + status: status.OK, + dynamicFilterFactories: clusterResult.dynamicFilterFactories }; } } return { methodConfig: {name: []}, - // cluster won't be used here, but it's set because of some TypeScript weirdness - pickInformation: {cluster: ''}, - status: status.UNAVAILABLE + // These fields won't be used here, but they're set because of some TypeScript weirdness + pickInformation: {cluster: '', hash: ''}, + status: status.UNAVAILABLE, + dynamicFilterFactories: [] }; }; trace('Created ConfigSelector with configuration:'); @@ -364,16 +615,16 @@ class XdsResolver implements Resolver { trace(matcher.toString()); trace('=> ' + action.toString()); } - const clusterConfigMap = new Map(); + const clusterConfigMap: {[key: string]: {child_policy: LoadBalancingConfig[]}} = {}; for (const clusterName of this.clusterRefcounts.keys()) { - clusterConfigMap.set(clusterName, {child_policy: [new CdsLoadBalancingConfig(clusterName)]}); + clusterConfigMap[clusterName] = {child_policy: [{cds: {cluster: clusterName}}]}; } - const lbPolicyConfig = new XdsClusterManagerLoadBalancingConfig(clusterConfigMap); + const lbPolicyConfig = {xds_cluster_manager: {children: clusterConfigMap}}; const serviceConfig: ServiceConfig = { methodConfig: [], loadBalancingConfig: [lbPolicyConfig] } - this.listener.onSuccessfulResolution([], serviceConfig, null, configSelector, {}); + this.listener.onSuccessfulResolution([], serviceConfig, null, configSelector, {xdsClient: this.xdsClient}); } private reportResolutionError(reason: string) { @@ -386,19 +637,51 @@ class XdsResolver implements Resolver { }); } - updateResolution(): void { - // Wait until updateResolution is called once to start the xDS requests + private startResolution(): void { if (!this.isLdsWatcherActive) { trace('Starting resolution for target ' + uriToString(this.target)); - getSingletonXdsClient().addListenerWatcher(this.target.path, this.ldsWatcher); - this.isLdsWatcherActive = true; + try { + this.listenerResourceName = getListenerResourceName(this.bootstrapInfo!, this.target); + trace('Resolving target ' + uriToString(this.target) + ' with Listener resource name ' + this.listenerResourceName); + ListenerResourceType.startWatch(this.xdsClient, this.listenerResourceName, this.ldsWatcher); + this.isLdsWatcherActive = true; + + } catch (e) { + this.reportResolutionError((e as Error).message); + } + } + } + + updateResolution(): void { + if (EXPERIMENTAL_FEDERATION) { + if (this.bootstrapInfo) { + this.startResolution(); + } else { + try { + this.bootstrapInfo = loadBootstrapInfo(); + } catch (e) { + this.reportResolutionError((e as Error).message); + } + this.startResolution(); + } + } else { + if (!this.isLdsWatcherActive) { + trace('Starting resolution for target ' + uriToString(this.target)); + ListenerResourceType.startWatch(this.xdsClient, this.target.path, this.ldsWatcher); + this.listenerResourceName = this.target.path; + this.isLdsWatcherActive = true; + } } } destroy() { - getSingletonXdsClient().removeListenerWatcher(this.target.path, this.ldsWatcher); + if (this.listenerResourceName) { + ListenerResourceType.cancelWatch(this.xdsClient, this.listenerResourceName, this.ldsWatcher); + this.isLdsWatcherActive = false; + } if (this.latestRouteConfigName) { - getSingletonXdsClient().removeRouteWatcher(this.latestRouteConfigName, this.rdsWatcher); + RouteConfigurationResourceType.cancelWatch(this.xdsClient, this.latestRouteConfigName, this.rdsWatcher); + this.latestRouteConfigName = null; } } diff --git a/packages/grpc-js-xds/src/resources.ts b/packages/grpc-js-xds/src/resources.ts new file mode 100644 index 000000000..4542c5fd6 --- /dev/null +++ b/packages/grpc-js-xds/src/resources.ts @@ -0,0 +1,150 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { URI } from 'vscode-uri'; +/* Since we are using an internal function from @grpc/proto-loader, we also + * need the top-level import to perform some setup operations. */ +import '@grpc/proto-loader'; +// This is a non-public, unstable API, but it's very convenient +import { loadProtosWithOptionsSync } from '@grpc/proto-loader/build/src/util'; +import { Cluster__Output } from './generated/envoy/config/cluster/v3/Cluster'; +import { ClusterLoadAssignment__Output } from './generated/envoy/config/endpoint/v3/ClusterLoadAssignment'; +import { Listener__Output } from './generated/envoy/config/listener/v3/Listener'; +import { RouteConfiguration__Output } from './generated/envoy/config/route/v3/RouteConfiguration'; +import { ClusterConfig__Output } from './generated/envoy/extensions/clusters/aggregate/v3/ClusterConfig'; +import { HttpConnectionManager__Output } from './generated/envoy/extensions/filters/network/http_connection_manager/v3/HttpConnectionManager'; +import { EXPERIMENTAL_FEDERATION } from './environment'; + +export const EDS_TYPE_URL = 'type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment'; +export const CDS_TYPE_URL = 'type.googleapis.com/envoy.config.cluster.v3.Cluster'; +export const LDS_TYPE_URL = 'type.googleapis.com/envoy.config.listener.v3.Listener'; +export const RDS_TYPE_URL = 'type.googleapis.com/envoy.config.route.v3.RouteConfiguration'; + +export type EdsTypeUrl = 'type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment'; +export type CdsTypeUrl = 'type.googleapis.com/envoy.config.cluster.v3.Cluster'; +export type LdsTypeUrl = 'type.googleapis.com/envoy.config.listener.v3.Listener'; +export type RdsTypeUrl = 'type.googleapis.com/envoy.config.route.v3.RouteConfiguration'; + +export type AdsTypeUrl = EdsTypeUrl | CdsTypeUrl | RdsTypeUrl | LdsTypeUrl; + +export const HTTP_CONNECTION_MANGER_TYPE_URL = + 'type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager'; + +export type HttpConnectionManagerTypeUrl = 'type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager'; + +export const CLUSTER_CONFIG_TYPE_URL = 'type.googleapis.com/envoy.extensions.clusters.aggregate.v3.ClusterConfig'; + +export type ClusterConfigTypeUrl = 'type.googleapis.com/envoy.extensions.clusters.aggregate.v3.ClusterConfig'; + +/** + * Map type URLs to their corresponding message types + */ +export type AdsOutputType = T extends EdsTypeUrl + ? ClusterLoadAssignment__Output + : T extends CdsTypeUrl + ? Cluster__Output + : T extends RdsTypeUrl + ? RouteConfiguration__Output + : T extends LdsTypeUrl + ? Listener__Output + : T extends HttpConnectionManagerTypeUrl + ? HttpConnectionManager__Output + : ClusterConfig__Output; + + + +const resourceRoot = loadProtosWithOptionsSync([ + 'envoy/config/listener/v3/listener.proto', + 'envoy/config/route/v3/route.proto', + 'envoy/config/cluster/v3/cluster.proto', + 'envoy/config/endpoint/v3/endpoint.proto', + 'envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto', + 'envoy/extensions/clusters/aggregate/v3/cluster.proto'], { + keepCase: true, + includeDirs: [ + // Paths are relative to src/build + __dirname + '/../../deps/envoy-api/', + __dirname + '/../../deps/xds/', + __dirname + '/../../deps/googleapis/', + __dirname + '/../../deps/protoc-gen-validate/', + ], + } +); + +const toObjectOptions = { + longs: String, + enums: String, + defaults: true, + oneofs: true +} + +export function decodeSingleResource(targetTypeUrl: T, message: Buffer): AdsOutputType { + const name = targetTypeUrl.substring(targetTypeUrl.lastIndexOf('/') + 1); + const type = resourceRoot.lookup(name); + if (type) { + const decodedMessage = (type as any).decode(message); + return decodedMessage.$type.toObject(decodedMessage, toObjectOptions) as AdsOutputType; + } else { + throw new Error(`ADS Error: unknown resource type ${targetTypeUrl}`); + } +} + +export interface XdsResourceName { + authority: string; + key: string; +} + +function stripStringPrefix(value: string, prefix: string): string { + if (value.startsWith(prefix)) { + return value.substring(prefix.length); + } else { + return value; + } +} + +export function parseXdsResourceName(name: string, typeUrl: string): XdsResourceName { + if (!EXPERIMENTAL_FEDERATION || !name.startsWith('xdstp:')) { + return { + authority: 'old:', + key: name + }; + } + const uri = URI.parse(name); + const pathComponents = stripStringPrefix(uri.path, '/').split('/'); + if (pathComponents[0] !== typeUrl) { + throw new Error('xdstp URI path must indicate valid xDS resource type.'); + } + let queryString: string; + if (uri.query.length > 0) { + const queryParams = uri.query.split('&'); + queryParams.sort(); + queryString = '?' + queryParams.join('&'); + } else { + queryString = ''; + } + return { + authority: uri.authority, + key: `${pathComponents.slice(1).join('/')}${queryString}` + }; +} + +export function xdsResourceNameToString(name: XdsResourceName, typeUrl: string): string { + if (name.authority === 'old:') { + return name.key; + } + return `xdstp://${name.authority}/${typeUrl}/${name.key}`; +} diff --git a/packages/grpc-js-xds/src/route-action.ts b/packages/grpc-js-xds/src/route-action.ts index 4ba2b5908..2f87fee96 100644 --- a/packages/grpc-js-xds/src/route-action.ts +++ b/packages/grpc-js-xds/src/route-action.ts @@ -14,31 +14,139 @@ * limitations under the License. */ +import { Metadata, MethodConfig, experimental } from '@grpc/grpc-js'; +import Duration = experimental.Duration; +import Filter = experimental.Filter; +import FilterFactory = experimental.FilterFactory; +import { RE2 } from 're2-wasm'; +import { xxhashApi } from './xxhash'; + +export interface ClusterResult { + name: string; + methodConfig: MethodConfig; + dynamicFilterFactories: FilterFactory[]; +} + export interface RouteAction { toString(): string; - getCluster(): string; + getCluster(): ClusterResult; + getHash(metadata: Metadata, channelId: number): bigint; +} + +function durationToLogString(duration: Duration) { + const millis = Math.floor(duration.nanos / 1_000_000); + if (millis > 0) { + return duration.seconds + '.' + millis; + } else { + return '' + duration.seconds; + } +} + +export interface HashPolicy { + type: 'HEADER' | 'CHANNEL_ID'; + terminal: boolean; + headerName?: string; + regex?: RE2; + regexSubstitution?: string; +} + +/** + * Must be called only after xxhash.loadXxhashApi() resolves. + * @param hashPolicies + * @param metadata + * @param channelId + */ +function getHash(hashPolicies: HashPolicy[], metadata: Metadata, channelId: number): bigint { + let hash: bigint | null = null; + for (const policy of hashPolicies) { + let newHash: bigint | null = null; + switch (policy.type) { + case 'CHANNEL_ID': + newHash = xxhashApi!.h64(`${channelId}`, 0n); + break; + case 'HEADER': { + if (!policy.headerName) { + break; + } + if (policy.headerName.endsWith('-bin')) { + break; + } + let headerString: string; + if (policy.headerName === 'content-type') { + headerString = 'application/grpc'; + } else { + const headerValues = metadata.get(policy.headerName); + if (headerValues.length === 0) { + break; + } + headerString = headerValues.join(','); + } + let rewrittenHeaderString = headerString; + if (policy.regex && policy.regexSubstitution) { + /* The JS string replace method uses $-prefixed patterns to produce + * other strings. See + * https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/replace#specifying_a_string_as_the_replacement + * RE2-based regex substitutions use \n where n is a number to refer + * to capture group n, and they otherwise have no special replacement + * patterns. See + * https://github.com/envoyproxy/envoy/blob/2443032526cf6e50d63d35770df9473dd0460fc0/api/envoy/type/matcher/v3/regex.proto#L79-L87 + * We convert an RE2 regex substitution into a string substitution by + * first replacing each "$" with "$$" (which produces "$" in the + * output), and then replace each "\n" for any whole number n with + * "$n". */ + const regexSubstitution = policy.regexSubstitution.replace(/\$/g, '$$$$').replace(/\\(\d+)/g, '$$$1'); + rewrittenHeaderString = headerString.replace(policy.regex, regexSubstitution); + } + newHash = xxhashApi!.h64(rewrittenHeaderString, 0n); + break; + } + } + if (hash === null) { + hash = newHash; + } else if (newHash !== null) { + hash = ((hash << 1n) | (hash >> 63n)) ^ newHash; + } + if (policy.terminal && hash !== null) { + break; + } + } + if (hash === null) { + return xxhashApi!.h64(`${Math.random()}`, 0n); + } else { + return hash; + } } export class SingleClusterRouteAction implements RouteAction { - constructor(private cluster: string) {} + constructor(private cluster: string, private methodConfig: MethodConfig, private extraFilterFactories: FilterFactory[], private hashPolicies: HashPolicy[]) {} getCluster() { - return this.cluster; + return { + name: this.cluster, + methodConfig: this.methodConfig, + dynamicFilterFactories: this.extraFilterFactories + }; + } + + getHash(metadata: Metadata, channelId: number): bigint { + return getHash(this.hashPolicies, metadata, channelId); } toString() { - return 'SingleCluster(' + this.cluster + ')'; + return 'SingleCluster(' + this.cluster + ', ' + JSON.stringify(this.methodConfig) + ')'; } } export interface WeightedCluster { name: string; weight: number; + dynamicFilterFactories: FilterFactory[]; } interface ClusterChoice { name: string; numerator: number; + dynamicFilterFactories: FilterFactory[]; } export class WeightedClusterRouteAction implements RouteAction { @@ -46,12 +154,12 @@ export class WeightedClusterRouteAction implements RouteAction { * The weighted cluster choices represented as a CDF */ private clusterChoices: ClusterChoice[]; - constructor(private clusters: WeightedCluster[], private totalWeight: number) { + constructor(private clusters: WeightedCluster[], private totalWeight: number, private methodConfig: MethodConfig, private hashPolicies: HashPolicy[]) { this.clusterChoices = []; let lastNumerator = 0; for (const clusterWeight of clusters) { lastNumerator += clusterWeight.weight; - this.clusterChoices.push({name: clusterWeight.name, numerator: lastNumerator}); + this.clusterChoices.push({name: clusterWeight.name, numerator: lastNumerator, dynamicFilterFactories: clusterWeight.dynamicFilterFactories}); } } @@ -59,14 +167,23 @@ export class WeightedClusterRouteAction implements RouteAction { const randomNumber = Math.random() * this.totalWeight; for (const choice of this.clusterChoices) { if (randomNumber < choice.numerator) { - return choice.name; + return { + name: choice.name, + methodConfig: this.methodConfig, + dynamicFilterFactories: choice.dynamicFilterFactories + }; } } // This should be prevented by the validation rules - return ''; + return {name: '', methodConfig: this.methodConfig, dynamicFilterFactories: []}; + } + + getHash(metadata: Metadata, channelId: number): bigint { + return getHash(this.hashPolicies, metadata, channelId); } toString() { - return 'WeightedCluster(' + this.clusters.map(({name, weight}) => '(' + name + ':' + weight + ')').join(', ') + ')'; + const clusterListString = this.clusters.map(({name, weight}) => '(' + name + ':' + weight + ')').join(', ') + return 'WeightedCluster(' + clusterListString + ', ' + JSON.stringify(this.methodConfig) + ')'; } -} \ No newline at end of file +} diff --git a/packages/grpc-js-xds/src/xds-bootstrap.ts b/packages/grpc-js-xds/src/xds-bootstrap.ts index 00e13d09f..536439dd0 100644 --- a/packages/grpc-js-xds/src/xds-bootstrap.ts +++ b/packages/grpc-js-xds/src/xds-bootstrap.ts @@ -16,25 +16,72 @@ */ import * as fs from 'fs'; +import { EXPERIMENTAL_FEDERATION } from './environment'; import { Struct } from './generated/google/protobuf/Struct'; -import { Node } from './generated/envoy/api/v2/core/Node'; import { Value } from './generated/google/protobuf/Value'; /* eslint-disable @typescript-eslint/no-explicit-any */ +export interface Locality { + region?: string; + zone?: string; + sub_zone?: string; +} + +export interface Node { + id: string, + locality: Locality; + cluster?: string; + metadata?: Struct; +} + export interface ChannelCredsConfig { type: string; config?: object; } export interface XdsServerConfig { - serverUri: string; - channelCreds: ChannelCredsConfig[]; + server_uri: string; + channel_creds: ChannelCredsConfig[]; + server_features: string[]; +} + +export interface Authority { + clientListenerResourceNameTemplate: string; + xdsServers?: XdsServerConfig[]; } export interface BootstrapInfo { xdsServers: XdsServerConfig[]; node: Node; + authorities: {[authorityName: string]: Authority}; + clientDefaultListenerResourceNameTemplate: string; +} + +const KNOWN_SERVER_FEATURES = ['ignore_resource_deletion']; + +export function serverConfigEqual(config1: XdsServerConfig, config2: XdsServerConfig): boolean { + if (config1.server_uri !== config2.server_uri) { + return false; + } + for (const feature of KNOWN_SERVER_FEATURES) { + if ((feature in config1.server_features) !== (feature in config2.server_features)) { + return false; + } + } + if (config1.channel_creds.length !== config2.channel_creds.length) { + return false; + } + for (const [index, creds1] of config1.channel_creds.entries()) { + const creds2 = config2.channel_creds[index]; + if (creds1.type !== creds2.type) { + return false; + } + if (JSON.stringify(creds1) !== JSON.stringify(creds2)) { + return false; + } + } + return true; } function validateChannelCredsConfig(obj: any): ChannelCredsConfig { @@ -46,7 +93,7 @@ function validateChannelCredsConfig(obj: any): ChannelCredsConfig { `xds_servers.channel_creds.type field: expected string, got ${typeof obj.type}` ); } - if ('config' in obj) { + if ('config' in obj && obj.config !== undefined) { if (typeof obj.config !== 'object' || obj.config === null) { throw new Error( 'xds_servers.channel_creds config field must be an object if provided' @@ -59,7 +106,15 @@ function validateChannelCredsConfig(obj: any): ChannelCredsConfig { }; } -function validateXdsServerConfig(obj: any): XdsServerConfig { +const SUPPORTED_CHANNEL_CREDS_TYPES = [ + 'google_default', + 'insecure' +]; + +export function validateXdsServerConfig(obj: any): XdsServerConfig { + if (!(typeof obj === 'object' && obj !== null)) { + throw new Error('xDS server config must be an object'); + } if (!('server_uri' in obj)) { throw new Error('server_uri field missing in xds_servers element'); } @@ -76,14 +131,33 @@ function validateXdsServerConfig(obj: any): XdsServerConfig { `xds_servers.channel_creds field: expected array, got ${typeof obj.channel_creds}` ); } - if (obj.channel_creds.length === 0) { + let foundSupported = false; + for (const cred of obj.channel_creds) { + if (SUPPORTED_CHANNEL_CREDS_TYPES.includes(cred.type)) { + foundSupported = true; + } + } + if (!foundSupported) { throw new Error( - 'xds_servers.channel_creds field: at least one entry is required' + `xds_servers.channel_creds field: must contain at least one entry with a type in [${SUPPORTED_CHANNEL_CREDS_TYPES}]` ); } + if ('server_features' in obj) { + if (!Array.isArray(obj.server_features)) { + throw new Error( + `xds_servers.server_features field: expected array, got ${typeof obj.server_features}` + ); + } + for (const feature of obj.server_features) { + if (typeof feature !== 'string') { + `xds_servers.server_features field element: expected string, got ${typeof feature}` + } + } + } return { - serverUri: obj.server_uri, - channelCreds: obj.channel_creds.map(validateChannelCredsConfig), + server_uri: obj.server_uri, + channel_creds: obj.channel_creds.map(validateChannelCredsConfig), + server_features: obj.server_features ?? [] }; } @@ -149,7 +223,10 @@ function getStructFromJson(obj: any): Struct { * @param obj */ function validateNode(obj: any): Node { - const result: Node = {}; + const result: Node = { + id: '', + locality: {} + }; if (!('id' in obj)) { throw new Error('id field missing in node element'); } @@ -202,47 +279,119 @@ function validateNode(obj: any): Node { return result; } -function validateBootstrapFile(obj: any): BootstrapInfo { +function validateAuthority(obj: any, authorityName: string): Authority { + if ('client_listener_resource_name_template' in obj) { + if (typeof obj.client_listener_resource_name_template !== 'string') { + throw new Error(`authorities[${authorityName}].client_listener_resource_name_template: expected string, got ${typeof obj.client_listener_resource_name_template}`); + } + if (!obj.client_listener_resource_name_template.startsWith(`xdstp://${authorityName}/`)) { + throw new Error(`authorities[${authorityName}].client_listener_resource_name_template must start with "xdstp://${authorityName}/"`); + } + } return { - xdsServers: obj.xds_servers.map(validateXdsServerConfig), - node: validateNode(obj.node), + clientListenerResourceNameTemplate: obj.client_listener_resource_name_template ?? `xdstp://${authorityName}/envoy.config.listener.v3.Listener/%s`, + xdsServers: obj.xds_servers?.map(validateXdsServerConfig) }; } -let loadedBootstrapInfo: Promise | null = null; +function validateAuthoritiesMap(obj: any): {[authorityName: string]: Authority} { + if (!obj) { + return {}; + } + const result: {[authorityName: string]: Authority} = {}; + for (const [name, authority] of Object.entries(obj)) { + result[name] = validateAuthority(authority, name); + } + return result; +} + +export function validateBootstrapConfig(obj: any): BootstrapInfo { + const xdsServers = obj.xds_servers.map(validateXdsServerConfig); + const node = validateNode(obj.node); + if (EXPERIMENTAL_FEDERATION) { + if ('client_default_listener_resource_name_template' in obj) { + if (typeof obj.client_default_listener_resource_name_template !== 'string') { + throw new Error(`client_default_listener_resource_name_template: expected string, got ${typeof obj.client_default_listener_resource_name_template}`); + } + } + return { + xdsServers: xdsServers, + node: node, + authorities: validateAuthoritiesMap(obj.authorities), + clientDefaultListenerResourceNameTemplate: obj.client_default_listener_resource_name_template ?? '%s' + }; + } else { + return { + xdsServers: xdsServers, + node: node, + authorities: {}, + clientDefaultListenerResourceNameTemplate: '%s' + }; + } +} + +let loadedBootstrapInfo: BootstrapInfo | null = null; -export async function loadBootstrapInfo(): Promise { +/** + * Load the bootstrap information from the location determined by the + * GRPC_XDS_BOOTSTRAP environment variable, or if that is unset, from the + * GRPC_XDS_BOOTSTRAP_CONFIG environment variable. The value is cached, so any + * calls after the first will just return the cached value. + * @returns + */ +export function loadBootstrapInfo(): BootstrapInfo { if (loadedBootstrapInfo !== null) { return loadedBootstrapInfo; } + + /** + * If GRPC_XDS_BOOTSTRAP exists + * then use its value as the name of the bootstrap file. + * + * If the file is missing or the contents of the file are malformed, + * return an error. + */ const bootstrapPath = process.env.GRPC_XDS_BOOTSTRAP; - if (bootstrapPath === undefined) { - return Promise.reject( - new Error( - 'The GRPC_XDS_BOOTSTRAP environment variable needs to be set to the path to the bootstrap file to use xDS' - ) - ); + if (bootstrapPath) { + let rawBootstrap: string; + try { + rawBootstrap = fs.readFileSync(bootstrapPath, { encoding: 'utf8'}); + } catch (e) { + throw new Error(`Failed to read xDS bootstrap file from path ${bootstrapPath} with error ${(e as Error).message}`); + } + try { + const parsedFile = JSON.parse(rawBootstrap); + loadedBootstrapInfo = validateBootstrapConfig(parsedFile); + return loadedBootstrapInfo; + } catch (e) { + throw new Error(`Failed to parse xDS bootstrap file at path ${bootstrapPath} with error ${(e as Error).message}`) + } } - loadedBootstrapInfo = new Promise((resolve, reject) => { - fs.readFile(bootstrapPath, { encoding: 'utf8' }, (err, data) => { - if (err) { - reject( - new Error( - `Failed to read xDS bootstrap file from path ${bootstrapPath} with error ${err.message}` - ) - ); - } - try { - const parsedFile = JSON.parse(data); - resolve(validateBootstrapFile(parsedFile)); - } catch (e) { - reject( - new Error( - `Failed to parse xDS bootstrap file at path ${bootstrapPath} with error ${e.message}` - ) - ); - } - }); - }); - return loadedBootstrapInfo; + + /** + * Else, if GRPC_XDS_BOOTSTRAP_CONFIG exists + * then use its value as the bootstrap config. + * + * If the value is malformed, return an error. + * + * See: https://github.com/grpc/grpc-node/issues/1868 + */ + const bootstrapConfig = process.env.GRPC_XDS_BOOTSTRAP_CONFIG; + if (bootstrapConfig) { + try { + const parsedConfig = JSON.parse(bootstrapConfig); + loadedBootstrapInfo = validateBootstrapConfig(parsedConfig); + } catch (e) { + throw new Error( + `Failed to parse xDS bootstrap config from environment variable GRPC_XDS_BOOTSTRAP_CONFIG with error ${(e as Error).message}` + ); + } + + return loadedBootstrapInfo; + } + + + throw new Error( + 'The GRPC_XDS_BOOTSTRAP or GRPC_XDS_BOOTSTRAP_CONFIG environment variables need to be set to the path to the bootstrap file to use xDS' + ); } diff --git a/packages/grpc-js-xds/src/xds-client.ts b/packages/grpc-js-xds/src/xds-client.ts index 22d816a03..540d946c1 100644 --- a/packages/grpc-js-xds/src/xds-client.ts +++ b/packages/grpc-js-xds/src/xds-client.ts @@ -1,5 +1,5 @@ /* - * Copyright 2020 gRPC authors. + * Copyright 2023 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,46 +15,26 @@ * */ -import * as protoLoader from '@grpc/proto-loader'; -import { loadPackageDefinition, StatusObject, status, logVerbosity, Metadata, experimental, ChannelOptions, ClientDuplexStream, ServiceError, ChannelCredentials } from '@grpc/grpc-js'; +import { Channel, ChannelCredentials, ClientDuplexStream, Metadata, StatusObject, connectivityState, experimental, loadPackageDefinition, logVerbosity, status } from "@grpc/grpc-js"; +import { XdsDecodeContext, XdsDecodeResult, XdsResourceType } from "./xds-resource-type/xds-resource-type"; +import { XdsResourceName, parseXdsResourceName, xdsResourceNameToString } from "./resources"; +import { Node } from "./generated/envoy/config/core/v3/Node"; +import { BootstrapInfo, XdsServerConfig, loadBootstrapInfo, serverConfigEqual } from "./xds-bootstrap"; +import BackoffTimeout = experimental.BackoffTimeout; +import { DiscoveryRequest } from "./generated/envoy/service/discovery/v3/DiscoveryRequest"; +import { DiscoveryResponse__Output } from "./generated/envoy/service/discovery/v3/DiscoveryResponse"; import * as adsTypes from './generated/ads'; import * as lrsTypes from './generated/lrs'; -import { loadBootstrapInfo } from './xds-bootstrap'; -import { isIPv4, isIPv6 } from 'net'; -import { Node } from './generated/envoy/api/v2/core/Node'; -import { AggregatedDiscoveryServiceClient } from './generated/envoy/service/discovery/v2/AggregatedDiscoveryService'; -import { DiscoveryRequest } from './generated/envoy/api/v2/DiscoveryRequest'; -import { DiscoveryResponse__Output } from './generated/envoy/api/v2/DiscoveryResponse'; -import { - ClusterLoadAssignment__Output, - ClusterLoadAssignment, -} from './generated/envoy/api/v2/ClusterLoadAssignment'; -import { Cluster__Output } from './generated/envoy/api/v2/Cluster'; -import { LoadReportingServiceClient } from './generated/envoy/service/load_stats/v2/LoadReportingService'; -import { LoadStatsRequest } from './generated/envoy/service/load_stats/v2/LoadStatsRequest'; -import { LoadStatsResponse__Output } from './generated/envoy/service/load_stats/v2/LoadStatsResponse'; -import { - Locality__Output, - Locality, -} from './generated/envoy/api/v2/core/Locality'; -import { - ClusterStats, - _envoy_api_v2_endpoint_ClusterStats_DroppedRequests, -} from './generated/envoy/api/v2/endpoint/ClusterStats'; -import { UpstreamLocalityStats } from './generated/envoy/api/v2/endpoint/UpstreamLocalityStats'; -import { Listener__Output } from './generated/envoy/api/v2/Listener'; -import { HttpConnectionManager__Output } from './generated/envoy/config/filter/network/http_connection_manager/v2/HttpConnectionManager'; -import { RouteConfiguration__Output } from './generated/envoy/api/v2/RouteConfiguration'; -import { Any__Output } from './generated/google/protobuf/Any'; -import BackoffTimeout = experimental.BackoffTimeout; -import ServiceConfig = experimental.ServiceConfig; -import { createGoogleDefaultCredentials } from './google-default-credentials'; -import { CdsLoadBalancingConfig } from './load-balancer-cds'; -import { EdsState } from './xds-stream-state/eds-state'; -import { CdsState } from './xds-stream-state/cds-state'; -import { RdsState } from './xds-stream-state/rds-state'; -import { LdsState } from './xds-stream-state/lds-state'; -import { Watcher } from './xds-stream-state/xds-stream-state'; +import * as protoLoader from '@grpc/proto-loader'; +import { AggregatedDiscoveryServiceClient } from "./generated/envoy/service/discovery/v3/AggregatedDiscoveryService"; +import { LoadReportingServiceClient } from "./generated/envoy/service/load_stats/v3/LoadReportingService"; +import { createGoogleDefaultCredentials } from "./google-default-credentials"; +import { Any__Output } from "./generated/google/protobuf/Any"; +import { LoadStatsRequest } from "./generated/envoy/service/load_stats/v3/LoadStatsRequest"; +import { LoadStatsResponse__Output } from "./generated/envoy/service/load_stats/v3/LoadStatsResponse"; +import { Locality, Locality__Output } from "./generated/envoy/config/core/v3/Locality"; +import { Duration } from "./generated/google/protobuf/Duration"; +import { registerXdsClientWithCsds } from "./csds"; const TRACER_NAME = 'xds_client'; @@ -62,43 +42,17 @@ function trace(text: string): void { experimental.trace(logVerbosity.DEBUG, TRACER_NAME, text); } -const clientVersion = require('../../package.json').version; - -const EDS_TYPE_URL = 'type.googleapis.com/envoy.api.v2.ClusterLoadAssignment'; -const CDS_TYPE_URL = 'type.googleapis.com/envoy.api.v2.Cluster'; -const LDS_TYPE_URL = 'type.googleapis.com/envoy.api.v2.Listener'; -const RDS_TYPE_URL = 'type.googleapis.com/envoy.api.v2.RouteConfiguration'; - -type EdsTypeUrl = 'type.googleapis.com/envoy.api.v2.ClusterLoadAssignment'; -type CdsTypeUrl = 'type.googleapis.com/envoy.api.v2.Cluster'; -type LdsTypeUrl = 'type.googleapis.com/envoy.api.v2.Listener'; -type RdsTypeUrl = 'type.googleapis.com/envoy.api.v2.RouteConfiguration'; - -type AdsTypeUrl = EdsTypeUrl | CdsTypeUrl | RdsTypeUrl | LdsTypeUrl; +let loadedProtos: adsTypes.ProtoGrpcType & lrsTypes.ProtoGrpcType | null = null; -const HTTP_CONNECTION_MANGER_TYPE_URL = - 'type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager'; - -let loadedProtos: Promise< - adsTypes.ProtoGrpcType & lrsTypes.ProtoGrpcType -> | null = null; - -function loadAdsProtos(): Promise< - adsTypes.ProtoGrpcType & lrsTypes.ProtoGrpcType -> { +function loadAdsProtos(): adsTypes.ProtoGrpcType & lrsTypes.ProtoGrpcType { if (loadedProtos !== null) { return loadedProtos; } - loadedProtos = protoLoader - .load( + return (loadPackageDefinition(protoLoader + .loadSync( [ - 'envoy/service/discovery/v2/ads.proto', - 'envoy/service/load_stats/v2/lrs.proto', - 'envoy/api/v2/listener.proto', - 'envoy/api/v2/route.proto', - 'envoy/api/v2/cluster.proto', - 'envoy/api/v2/endpoint.proto', - 'envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto', + 'envoy/service/discovery/v3/ads.proto', + 'envoy/service/load_stats/v3/lrs.proto', ], { keepCase: true, @@ -110,21 +64,452 @@ function loadAdsProtos(): Promise< includeDirs: [ // Paths are relative to src/build __dirname + '/../../deps/envoy-api/', - __dirname + '/../../deps/udpa/', + __dirname + '/../../deps/xds/', __dirname + '/../../deps/googleapis/', __dirname + '/../../deps/protoc-gen-validate/', ], } - ) - .then( - (packageDefinition) => - (loadPackageDefinition( - packageDefinition - ) as unknown) as adsTypes.ProtoGrpcType & lrsTypes.ProtoGrpcType + )) as unknown) as adsTypes.ProtoGrpcType & lrsTypes.ProtoGrpcType; +} + +const clientVersion = require('../../package.json').version; + +export interface ResourceWatcherInterface { + onGenericResourceChanged(resource: object): void; + onError(status: StatusObject): void; + onResourceDoesNotExist(): void; +} + +export interface BasicWatcher { + onResourceChanged(resource: UpdateType): void; + onError(status: StatusObject): void; + onResourceDoesNotExist(): void; +} + +export class Watcher implements ResourceWatcherInterface { + constructor(private internalWatcher: BasicWatcher) {} + onGenericResourceChanged(resource: object): void { + this.internalWatcher.onResourceChanged(resource as unknown as UpdateType); + } + onError(status: StatusObject) { + this.internalWatcher.onError(status); + } + onResourceDoesNotExist() { + this.internalWatcher.onResourceDoesNotExist(); + } +} + +const RESOURCE_TIMEOUT_MS = 15_000; + +class ResourceTimer { + private timer: NodeJS.Timer | null = null; + private resourceSeen = false; + constructor(private callState: AdsCallState, private type: XdsResourceType, private name: XdsResourceName) {} + + maybeCancelTimer() { + if (this.timer) { + clearTimeout(this.timer); + this.timer = null; + } + } + + markSeen() { + this.resourceSeen = true; + this.maybeCancelTimer(); + } + + markAdsStreamStarted() { + this.maybeStartTimer(); + } + + private maybeStartTimer() { + if (this.resourceSeen) { + return; + } + if (this.timer) { + return; + } + const authorityState = this.callState.client.xdsClient.authorityStateMap.get(this.name.authority); + if (!authorityState) { + return; + } + const resourceState = authorityState.resourceMap.get(this.type)?.get(this.name.key); + if (resourceState?.cachedResource) { + return; + } + this.timer = setTimeout(() => { + this.onTimer(); + }, RESOURCE_TIMEOUT_MS); + } + + private onTimer() { + const authorityState = this.callState.client.xdsClient.authorityStateMap.get(this.name.authority); + const resourceState = authorityState?.resourceMap.get(this.type)?.get(this.name.key); + if (!resourceState) { + return; + } + resourceState.meta.clientStatus = 'DOES_NOT_EXIST'; + for (const watcher of resourceState.watchers) { + watcher.onResourceDoesNotExist(); + } + } +} + +interface AdsParseResult { + type?: XdsResourceType; + typeUrl?: string; + version?: string; + nonce?: string; + errors: string[]; + /** + * authority -> set of keys + */ + resourcesSeen: Map>; + haveValidResources: boolean; +} + +/** + * Responsible for parsing a single ADS response, one resource at a time + */ +class AdsResponseParser { + private result: AdsParseResult = { + errors: [], + resourcesSeen: new Map(), + haveValidResources: false + }; + private updateTime = new Date(); + + constructor(private adsCallState: AdsCallState) {} + + processAdsResponseFields(message: DiscoveryResponse__Output) { + const type = this.adsCallState.client.xdsClient.getResourceType(message.type_url); + if (!type) { + throw new Error(`Unexpected type URL ${message.type_url}`); + } + this.result.type = type; + this.result.typeUrl = message.type_url; + this.result.nonce = message.nonce; + this.result.version = message.version_info; + } + + parseResource(index: number, resource: Any__Output) { + const errorPrefix = `resource index ${index}:`; + if (resource.type_url !== this.result.typeUrl) { + this.result.errors.push(`${errorPrefix} incorrect resource type "${resource.type_url}" (should be "${this.result.typeUrl}")`); + return; + } + if (!this.result.type) { + return; + } + const decodeContext: XdsDecodeContext = { + server: this.adsCallState.client.xdsServerConfig + }; + let decodeResult: XdsDecodeResult; + try { + decodeResult = this.result.type.decode(decodeContext, resource); + } catch (e) { + this.result.errors.push(`${errorPrefix} ${(e as Error).message}`); + return; + } + let parsedName: XdsResourceName; + try { + parsedName = parseXdsResourceName(decodeResult.name, this.result.type!.getTypeUrl()); + } catch (e) { + this.result.errors.push(`${errorPrefix} ${(e as Error).message}`); + return; + } + this.adsCallState.typeStates.get(this.result.type!)?.subscribedResources.get(parsedName.authority)?.get(parsedName.key)?.markSeen(); + if (this.result.type.allResourcesRequiredInSotW()) { + if (!this.result.resourcesSeen.has(parsedName.authority)) { + this.result.resourcesSeen.set(parsedName.authority, new Set()); + } + this.result.resourcesSeen.get(parsedName.authority)!.add(parsedName.key); + } + const resourceState = this.adsCallState.client.xdsClient.authorityStateMap.get(parsedName.authority)?.resourceMap.get(this.result.type)?.get(parsedName.key); + if (!resourceState) { + // No subscription for this resource + return; + } + if (resourceState.deletionIgnored) { + experimental.log(logVerbosity.INFO, `Received resource with previously ignored deletion: ${decodeResult.name}`); + resourceState.deletionIgnored = false; + } + if (decodeResult.error) { + this.result.errors.push(`${errorPrefix} ${decodeResult.error}`); + process.nextTick(() => { + for (const watcher of resourceState.watchers) { + watcher.onError({code: status.UNAVAILABLE, details: decodeResult.error!, metadata: new Metadata()}); + } + }); + resourceState.meta.clientStatus = 'NACKED'; + resourceState.meta.failedVersion = this.result.version!; + resourceState.meta.failedDetails = decodeResult.error; + resourceState.meta.failedUpdateTime = this.updateTime; + return; + } + if (!decodeResult.value) { + return; + } + this.adsCallState.client.trace('Parsed resource of type ' + this.result.type.getTypeUrl() + ': ' + JSON.stringify(decodeResult.value, (key, value) => (value && value.type === 'Buffer' && Array.isArray(value.data)) ? (value.data as Number[]).map(n => n.toString(16)).join('') : value, 2)); + this.result.haveValidResources = true; + if (this.result.type.resourcesEqual(resourceState.cachedResource, decodeResult.value)) { + return; + } + resourceState.cachedResource = decodeResult.value; + resourceState.meta = { + clientStatus: 'ACKED', + rawResource: resource, + updateTime: this.updateTime, + version: this.result.version! + }; + process.nextTick(() => { + for (const watcher of resourceState.watchers) { + watcher.onGenericResourceChanged(decodeResult.value!); + } + }); + } + + getResult() { + return this.result; + } +} + +type AdsCall = ClientDuplexStream; + +interface ResourceTypeState { + nonce?: string; + error?: string; + /** + * authority -> key -> timer + */ + subscribedResources: Map>; +} + +class AdsCallState { + public typeStates: Map = new Map(); + private receivedAnyResponse = false; + private sentInitialMessage = false; + constructor(public client: XdsSingleServerClient, private call: AdsCall, private node: Node) { + // Populate subscription map with existing subscriptions + for (const [authority, authorityState] of client.xdsClient.authorityStateMap) { + if (authorityState.client !== client) { + continue; + } + for (const [type, typeMap] of authorityState.resourceMap) { + for (const key of typeMap.keys()) { + this.subscribe(type, {authority, key}, true); + } + } + } + for (const type of this.typeStates.keys()) { + this.updateNames(type); + } + call.on('data', (message: DiscoveryResponse__Output) => { + this.handleResponseMessage(message); + }) + call.on('status', (status: StatusObject) => { + this.handleStreamStatus(status); + }); + call.on('error', () => {}); + } + + private trace(text: string) { + this.client.trace(text); + } + + private handleResponseMessage(message: DiscoveryResponse__Output) { + const parser = new AdsResponseParser(this); + let handledAdsResponseFields: boolean; + try { + parser.processAdsResponseFields(message); + handledAdsResponseFields = true; + } catch (e) { + this.trace('ADS response field parsing failed for type ' + message.type_url); + handledAdsResponseFields = false; + } + if (handledAdsResponseFields) { + for (const [index, resource] of message.resources.entries()) { + parser.parseResource(index, resource); + } + const result = parser.getResult(); + const typeState = this.typeStates.get(result.type!); + if (!typeState) { + this.trace('Type state not found for type ' + result.type!.getTypeUrl()); + return; + } + typeState.nonce = result.nonce; + if (result.errors.length > 0) { + typeState.error = `xDS response validation errors: [${result.errors.join('; ')}]`; + } else { + delete typeState.error; + } + // Delete resources not seen in update if needed + if (result.type!.allResourcesRequiredInSotW()) { + for (const [authority, authorityState] of this.client.xdsClient.authorityStateMap) { + if (authorityState.client !== this.client) { + continue; + } + const typeMap = authorityState.resourceMap.get(result.type!); + if (!typeMap) { + continue; + } + for (const [key, resourceState] of typeMap) { + if (!result.resourcesSeen.get(authority)?.has(key)) { + /* Do nothing for resources that have no cached value. Those are + * handled by the resource timer. */ + if (!resourceState.cachedResource) { + continue; + } + if (this.client.ignoreResourceDeletion) { + experimental.log(logVerbosity.ERROR, 'Ignoring nonexistent resource ' + xdsResourceNameToString({authority, key}, result.type!.getTypeUrl())); + resourceState.deletionIgnored = true; + } else { + resourceState.meta.clientStatus = 'DOES_NOT_EXIST'; + process.nextTick(() => { + for (const watcher of resourceState.watchers) { + watcher.onResourceDoesNotExist(); + } + }); + } + } + } + } + } + if (result.haveValidResources || result.errors.length === 0) { + this.client.resourceTypeVersionMap.set(result.type!, result.version!); + } + this.updateNames(result.type!); + } + } + + private* allWatchers() { + for (const [type, typeState] of this.typeStates) { + for (const [authority, authorityMap] of typeState.subscribedResources) { + for (const key of authorityMap.keys()) { + yield* this.client.xdsClient.authorityStateMap.get(authority)?.resourceMap.get(type)?.get(key)?.watchers ?? []; + } + } + } + } + + private handleStreamStatus(streamStatus: StatusObject) { + this.trace( + 'ADS stream ended. code=' + streamStatus.code + ' details= ' + streamStatus.details ); - return loadedProtos; + if (streamStatus.code !== status.OK && !this.receivedAnyResponse) { + for (const watcher of this.allWatchers()) { + watcher.onError(streamStatus); + } + } + this.client.handleAdsStreamEnd(); + } + + hasSubscribedResources(): boolean { + for (const typeState of this.typeStates.values()) { + for (const authorityMap of typeState.subscribedResources.values()) { + if (authorityMap.size > 0) { + return true; + } + } + } + return false; + } + + subscribe(type: XdsResourceType, name: XdsResourceName, delaySend: boolean = false) { + let typeState = this.typeStates.get(type); + if (!typeState) { + typeState = { + nonce: '', + subscribedResources: new Map() + }; + this.typeStates.set(type, typeState); + } + let authorityMap = typeState.subscribedResources.get(name.authority); + if (!authorityMap) { + authorityMap = new Map(); + typeState.subscribedResources.set(name.authority, authorityMap); + } + if (!authorityMap.has(name.key)) { + const timer = new ResourceTimer(this, type, name); + authorityMap.set(name.key, timer); + if (!delaySend) { + this.updateNames(type); + } + } + } + + unsubscribe(type: XdsResourceType, name: XdsResourceName) { + const typeState = this.typeStates.get(type); + if (!typeState) { + return; + } + const authorityMap = typeState.subscribedResources.get(name.authority); + if (!authorityMap) { + return; + } + authorityMap.delete(name.key); + if (authorityMap.size === 0) { + typeState.subscribedResources.delete(name.authority); + } + if (typeState.subscribedResources.size === 0) { + this.typeStates.delete(type); + } + this.updateNames(type); + } + + resourceNamesForRequest(type: XdsResourceType): string[] { + const typeState = this.typeStates.get(type); + if (!typeState) { + return []; + } + const result: string[] = []; + for (const [authority, authorityMap] of typeState.subscribedResources) { + for (const [key, timer] of authorityMap) { + result.push(xdsResourceNameToString({authority, key}, type.getTypeUrl())); + } + } + return result; + } + + updateNames(type: XdsResourceType) { + const typeState = this.typeStates.get(type); + if (!typeState) { + return; + } + const request: DiscoveryRequest = { + node: this.sentInitialMessage ? null : this.node, + type_url: type.getFullTypeUrl(), + response_nonce: typeState.nonce, + resource_names: this.resourceNamesForRequest(type), + version_info: this.client.resourceTypeVersionMap.get(type), + error_detail: typeState.error ? { code: status.UNAVAILABLE, message: typeState.error} : null + }; + this.trace('Sending discovery request: ' + JSON.stringify(request, undefined, 2)); + this.call.write(request); + this.sentInitialMessage = true; + } + + end() { + this.call.end(); + } + + /** + * Should be called when the channel state is READY after starting the + * stream. + */ + markStreamStarted() { + for (const [type, typeState] of this.typeStates) { + for (const [authority, authorityMap] of typeState.subscribedResources) { + for (const resourceTimer of authorityMap.values()) { + resourceTimer.markAdsStreamStarted(); + } + } + } + } } +type LrsCall = ClientDuplexStream; + function localityEqual( loc1: Locality__Output, loc2: Locality__Output @@ -137,6 +522,7 @@ function localityEqual( } export interface XdsClusterDropStats { + addUncategorizedCallDropped(): void; addCallDropped(category: string): void; } @@ -145,26 +531,57 @@ export interface XdsClusterLocalityStats { addCallFinished(fail: boolean): void; } +interface DroppedRequests { + category: string; + dropped_count: number; +} + +interface UpstreamLocalityStats { + locality: Locality; + total_issued_requests: number; + total_successful_requests: number; + total_error_requests: number; + total_requests_in_progress: number; +} + +/** + * An interface representing the ClusterStats message type, restricted to the + * fields used in this module to ensure compatibility with both v2 and v3 APIs. + */ +interface ClusterStats { + cluster_name: string; + cluster_service_name: string; + dropped_requests: DroppedRequests[]; + total_dropped_requests: number; + upstream_locality_stats: UpstreamLocalityStats[]; + load_report_interval: Duration +} + interface ClusterLocalityStats { locality: Locality__Output; callsStarted: number; callsSucceeded: number; callsFailed: number; callsInProgress: number; + refcount: number; } interface ClusterLoadReport { callsDropped: Map; - localityStats: ClusterLocalityStats[]; + uncategorizedCallsDropped: number; + localityStats: Set; intervalStart: [number, number]; } +interface StatsMapEntry { + clusterName: string; + edsServiceName: string; + refCount: number; + stats: ClusterLoadReport; +} + class ClusterLoadReportMap { - private statsMap: { - clusterName: string; - edsServiceName: string; - stats: ClusterLoadReport; - }[] = []; + private statsMap: Set = new Set(); get( clusterName: string, @@ -181,23 +598,34 @@ class ClusterLoadReportMap { return undefined; } + /** + * Get the indicated map entry if it exists, or create a new one if it does + * not. Increments the refcount of that entry, so a call to this method + * should correspond to a later call to unref + * @param clusterName + * @param edsServiceName + * @returns + */ getOrCreate(clusterName: string, edsServiceName: string): ClusterLoadReport { for (const statsObj of this.statsMap) { if ( statsObj.clusterName === clusterName && statsObj.edsServiceName === edsServiceName ) { + statsObj.refCount += 1; return statsObj.stats; } } const newStats: ClusterLoadReport = { callsDropped: new Map(), - localityStats: [], + uncategorizedCallsDropped: 0, + localityStats: new Set(), intervalStart: process.hrtime(), }; - this.statsMap.push({ + this.statsMap.add({ clusterName, edsServiceName, + refCount: 1, stats: newStats, }); return newStats; @@ -216,430 +644,100 @@ class ClusterLoadReportMap { ]; } } -} - -interface AdsState { - [EDS_TYPE_URL]: EdsState; - [CDS_TYPE_URL]: CdsState; - [RDS_TYPE_URL]: RdsState; - [LDS_TYPE_URL]: LdsState; -} - -/** - * Map type URLs to their corresponding message types - */ -type OutputType = T extends EdsTypeUrl - ? ClusterLoadAssignment__Output - : T extends CdsTypeUrl - ? Cluster__Output - : T extends RdsTypeUrl - ? RouteConfiguration__Output - : Listener__Output; - -function getResponseMessages( - typeUrl: T, - resources: Any__Output[] -): OutputType[] { - const result: OutputType[] = []; - for (const resource of resources) { - if (protoLoader.isAnyExtension(resource) && resource['@type'] === typeUrl) { - result.push(resource as protoLoader.AnyExtension & OutputType); - } else { - throw new Error( - `ADS Error: Invalid resource type ${ - protoLoader.isAnyExtension(resource) - ? resource['@type'] - : resource.type_url - }, expected ${typeUrl}` - ); - } - } - return result; -} - -export class XdsClient { - private adsNode: Node | null = null; - private adsClient: AggregatedDiscoveryServiceClient | null = null; - private adsCall: ClientDuplexStream< - DiscoveryRequest, - DiscoveryResponse__Output - > | null = null; - - private lrsNode: Node | null = null; - private lrsClient: LoadReportingServiceClient | null = null; - private lrsCall: ClientDuplexStream< - LoadStatsRequest, - LoadStatsResponse__Output - > | null = null; - private latestLrsSettings: LoadStatsResponse__Output | null = null; - - private clusterStatsMap: ClusterLoadReportMap = new ClusterLoadReportMap(); - private statsTimer: NodeJS.Timer; - - private hasShutdown = false; - - private adsState: AdsState; - - private adsBackoff: BackoffTimeout; - private lrsBackoff: BackoffTimeout; - - constructor() { - const edsState = new EdsState(() => { - this.updateNames(EDS_TYPE_URL); - }); - const cdsState = new CdsState(edsState, () => { - this.updateNames(CDS_TYPE_URL); - }); - const rdsState = new RdsState(() => { - this.updateNames(RDS_TYPE_URL); - }); - const ldsState = new LdsState(rdsState, () => { - this.updateNames(LDS_TYPE_URL); - }); - this.adsState = { - [EDS_TYPE_URL]: edsState, - [CDS_TYPE_URL]: cdsState, - [RDS_TYPE_URL]: rdsState, - [LDS_TYPE_URL]: ldsState, - }; - - const channelArgs = { - // 5 minutes - 'grpc.keepalive_time_ms': 5 * 60 * 1000 - } - - this.adsBackoff = new BackoffTimeout(() => { - this.maybeStartAdsStream(); - }); - this.adsBackoff.unref(); - this.lrsBackoff = new BackoffTimeout(() => { - this.maybeStartLrsStream(); - }); - this.lrsBackoff.unref(); - Promise.all([loadBootstrapInfo(), loadAdsProtos()]).then( - ([bootstrapInfo, protoDefinitions]) => { - if (this.hasShutdown) { - return; - } - const node: Node = { - ...bootstrapInfo.node, - build_version: `gRPC Node Pure JS ${clientVersion}`, - user_agent_name: 'gRPC Node Pure JS', - }; - this.adsNode = { - ...node, - client_features: ['envoy.lb.does_not_support_overprovisioning'], - }; - this.lrsNode = { - ...node, - client_features: ['envoy.lrs.supports_send_all_clusters'], - }; - const credentialsConfigs = bootstrapInfo.xdsServers[0].channelCreds; - let channelCreds: ChannelCredentials | null = null; - for (const config of credentialsConfigs) { - if (config.type === 'google_default') { - channelCreds = createGoogleDefaultCredentials(); - break; - } else if (config.type === 'insecure') { - channelCreds = ChannelCredentials.createInsecure(); - break; - } - } - if (channelCreds === null) { - trace('Failed to initialize xDS Client. No valid credentials types found.'); - // Bubble this error up to any listeners - this.reportStreamError({ - code: status.INTERNAL, - details: 'Failed to initialize xDS Client. No valid credentials types found.', - metadata: new Metadata(), - }); - return; + unref(clusterName: string, edsServiceName: string) { + for (const statsObj of this.statsMap) { + if ( + statsObj.clusterName === clusterName && + statsObj.edsServiceName === edsServiceName + ) { + statsObj.refCount -=1; + if (statsObj.refCount === 0) { + this.statsMap.delete(statsObj); } - trace('Starting xDS client connected to server URI ' + bootstrapInfo.xdsServers[0].serverUri); - this.adsClient = new protoDefinitions.envoy.service.discovery.v2.AggregatedDiscoveryService( - bootstrapInfo.xdsServers[0].serverUri, - channelCreds, - channelArgs - ); - this.maybeStartAdsStream(); - - this.lrsClient = new protoDefinitions.envoy.service.load_stats.v2.LoadReportingService( - bootstrapInfo.xdsServers[0].serverUri, - channelCreds, - {channelOverride: this.adsClient.getChannel()} - ); - this.maybeStartLrsStream(); - }, - (error) => { - trace('Failed to initialize xDS Client. ' + error.message); - // Bubble this error up to any listeners - this.reportStreamError({ - code: status.INTERNAL, - details: `Failed to initialize xDS Client. ${error.message}`, - metadata: new Metadata(), - }); + return; } - ); - this.statsTimer = setInterval(() => {}, 0); - clearInterval(this.statsTimer); - } - - private handleAdsResponse(message: DiscoveryResponse__Output) { - let errorString: string | null; - /* The cases in this switch statement look redundant but separating them - * out like this is necessary for the typechecker to validate the types - * as narrowly as we need it to. */ - switch (message.type_url) { - case EDS_TYPE_URL: - errorString = this.adsState[message.type_url].handleResponses( - getResponseMessages(message.type_url, message.resources) - ); - break; - case CDS_TYPE_URL: - errorString = this.adsState[message.type_url].handleResponses( - getResponseMessages(message.type_url, message.resources) - ); - break; - case RDS_TYPE_URL: - errorString = this.adsState[message.type_url].handleResponses( - getResponseMessages(message.type_url, message.resources) - ); - break; - case LDS_TYPE_URL: - errorString = this.adsState[message.type_url].handleResponses( - getResponseMessages(message.type_url, message.resources) - ); - break; - default: - errorString = `Unknown type_url ${message.type_url}`; - } - if (errorString === null) { - trace('Acking message with type URL ' + message.type_url); - /* errorString can only be null in one of the first 4 cases, which - * implies that message.type_url is one of the 4 known type URLs, which - * means that this type assertion is valid. */ - const typeUrl = message.type_url as AdsTypeUrl; - this.adsState[typeUrl].nonce = message.nonce; - this.adsState[typeUrl].versionInfo = message.version_info; - this.ack(typeUrl); - } else { - trace('Nacking message with type URL ' + message.type_url + ': "' + errorString + '"'); - this.nack(message.type_url, errorString); } } - /** - * Start the ADS stream if the client exists and there is not already an - * existing stream, and there - */ - private maybeStartAdsStream() { - if (this.adsClient === null) { - return; - } - if (this.adsCall !== null) { - return; - } - if (this.hasShutdown) { - return; - } - if (this.adsState[EDS_TYPE_URL].getResourceNames().length === 0 && - this.adsState[CDS_TYPE_URL].getResourceNames().length === 0 && - this.adsState[RDS_TYPE_URL].getResourceNames().length === 0 && - this.adsState[LDS_TYPE_URL].getResourceNames().length === 0) { - return; - } - trace('Starting ADS stream'); - // Backoff relative to when we start the request - this.adsBackoff.runOnce(); - this.adsCall = this.adsClient.StreamAggregatedResources(); - this.adsCall.on('data', (message: DiscoveryResponse__Output) => { - this.handleAdsResponse(message); - }); - this.adsCall.on('error', (error: ServiceError) => { - trace( - 'ADS stream ended. code=' + error.code + ' details= ' + error.details - ); - this.adsCall = null; - this.reportStreamError(error); - /* If the backoff timer is no longer running, we do not need to wait any - * more to start the new call. */ - if (!this.adsBackoff.isRunning()) { - this.maybeStartAdsStream(); - } - }); - - const allTypeUrls: AdsTypeUrl[] = [ - EDS_TYPE_URL, - CDS_TYPE_URL, - RDS_TYPE_URL, - LDS_TYPE_URL, - ]; - for (const typeUrl of allTypeUrls) { - const state = this.adsState[typeUrl]; - if (state.getResourceNames().length > 0) { - this.updateNames(typeUrl); - } - } + get size() { + return this.statsMap.size; } +} - /** - * Acknowledge an update. This should be called after the local nonce and - * version info are updated so that it sends the post-update values. - */ - ack(typeUrl: AdsTypeUrl) { - /* An ack is the best indication of a successful interaction between the - * client and the server, so we can reset the backoff timer here. */ - this.adsBackoff.stop(); - this.adsBackoff.reset(); - - this.updateNames(typeUrl); +class LrsCallState { + private statsTimer: NodeJS.Timer | null = null; + private sentInitialMessage = false; + constructor(private client: XdsSingleServerClient, private call: LrsCall, private node: Node) { + call.on('data', (message: LoadStatsResponse__Output) => { + this.handleResponseMessage(message); + }) + call.on('status', (status: StatusObject) => { + this.handleStreamStatus(status); + }); + call.on('error', () => {}); + this.sendStats(); } - /** - * Reject an update. This should be called without updating the local - * nonce and version info. - */ - private nack(typeUrl: string, message: string) { - let resourceNames: string[]; - let nonce: string; - let versionInfo: string; - switch (typeUrl) { - case EDS_TYPE_URL: - case CDS_TYPE_URL: - case RDS_TYPE_URL: - case LDS_TYPE_URL: - resourceNames = this.adsState[typeUrl].getResourceNames(); - nonce = this.adsState[typeUrl].nonce; - versionInfo = this.adsState[typeUrl].versionInfo; - break; - default: - resourceNames = []; - nonce = ''; - versionInfo = ''; - } - this.adsCall?.write({ - node: this.adsNode!, - type_url: typeUrl, - resource_names: resourceNames, - response_nonce: nonce, - version_info: versionInfo, - error_detail: { - message: message, - }, - }); + private handleStreamStatus(status: StatusObject) { + this.client.trace( + 'LRS stream ended. code=' + status.code + ' details= ' + status.details + ); + this.client.handleLrsStreamEnd(); } - private updateNames(typeUrl: AdsTypeUrl) { - if (this.adsState[EDS_TYPE_URL].getResourceNames().length === 0 && - this.adsState[CDS_TYPE_URL].getResourceNames().length === 0 && - this.adsState[RDS_TYPE_URL].getResourceNames().length === 0 && - this.adsState[LDS_TYPE_URL].getResourceNames().length === 0) { - this.adsCall?.end(); - this.lrsCall?.end(); - return; + private handleResponseMessage(message: LoadStatsResponse__Output) { + this.client.trace('Received LRS response'); + this.client.onLrsStreamReceivedMessage(); + if ( + !this.statsTimer || + message.load_reporting_interval?.seconds !== + this.client.latestLrsSettings?.load_reporting_interval?.seconds || + message.load_reporting_interval?.nanos !== + this.client.latestLrsSettings?.load_reporting_interval?.nanos + ) { + /* Only reset the timer if the interval has changed or was not set + * before. */ + if (this.statsTimer) { + clearInterval(this.statsTimer); + } + /* Convert a google.protobuf.Duration to a number of milliseconds for + * use with setInterval. */ + const loadReportingIntervalMs = + Number.parseInt(message.load_reporting_interval!.seconds) * 1000 + + message.load_reporting_interval!.nanos / 1_000_000; + this.client.trace('Received LRS response with load reporting interval ' + loadReportingIntervalMs + ' ms'); + this.statsTimer = setInterval(() => { + this.sendStats(); + }, loadReportingIntervalMs); } - this.maybeStartAdsStream(); - this.maybeStartLrsStream(); - trace('Sending update for type URL ' + typeUrl + ' with names ' + this.adsState[typeUrl].getResourceNames()); - this.adsCall?.write({ - node: this.adsNode!, - type_url: typeUrl, - resource_names: this.adsState[typeUrl].getResourceNames(), - response_nonce: this.adsState[typeUrl].nonce, - version_info: this.adsState[typeUrl].versionInfo, - }); + this.client.latestLrsSettings = message; } - private reportStreamError(status: StatusObject) { - this.adsState[EDS_TYPE_URL].reportStreamError(status); - this.adsState[CDS_TYPE_URL].reportStreamError(status); - this.adsState[RDS_TYPE_URL].reportStreamError(status); - this.adsState[LDS_TYPE_URL].reportStreamError(status); + private sendLrsMessage(clusterStats: ClusterStats[]) { + const request: LoadStatsRequest = { + node: this.sentInitialMessage ? null : this.node, + cluster_stats: clusterStats + }; + this.client.trace('Sending LRS message ' + JSON.stringify(request, undefined, 2)); + this.call.write(request); + this.sentInitialMessage = true; } - private maybeStartLrsStream() { - if (!this.lrsClient) { - return; - } - if (this.lrsCall) { - return; - } - if (this.hasShutdown) { - return; - } - if (this.adsState[EDS_TYPE_URL].getResourceNames().length === 0 && - this.adsState[CDS_TYPE_URL].getResourceNames().length === 0 && - this.adsState[RDS_TYPE_URL].getResourceNames().length === 0 && - this.adsState[LDS_TYPE_URL].getResourceNames().length === 0) { - return; - } - - trace('Starting LRS stream'); - - this.lrsBackoff.runOnce(); - this.lrsCall = this.lrsClient.streamLoadStats(); - let receivedSettingsForThisStream = false; - this.lrsCall.on('data', (message: LoadStatsResponse__Output) => { - /* Once we get any response from the server, we assume that the stream is - * in a good state, so we can reset the backoff timer. */ - this.lrsBackoff.stop(); - this.lrsBackoff.reset(); - if ( - !receivedSettingsForThisStream || - message.load_reporting_interval?.seconds !== - this.latestLrsSettings?.load_reporting_interval?.seconds || - message.load_reporting_interval?.nanos !== - this.latestLrsSettings?.load_reporting_interval?.nanos - ) { - /* Only reset the timer if the interval has changed or was not set - * before. */ - clearInterval(this.statsTimer); - /* Convert a google.protobuf.Duration to a number of milliseconds for - * use with setInterval. */ - const loadReportingIntervalMs = - Number.parseInt(message.load_reporting_interval!.seconds) * 1000 + - message.load_reporting_interval!.nanos / 1_000_000; - trace('Received LRS request with load reporting interval ' + loadReportingIntervalMs + ' ms'); - this.statsTimer = setInterval(() => { - this.sendStats(); - }, loadReportingIntervalMs); - } - this.latestLrsSettings = message; - receivedSettingsForThisStream = true; - }); - this.lrsCall.on('error', (error: ServiceError) => { - trace( - 'LRS stream ended. code=' + error.code + ' details= ' + error.details - ); - this.lrsCall = null; - clearInterval(this.statsTimer); - /* If the backoff timer is no longer running, we do not need to wait any - * more to start the new call. */ - if (!this.lrsBackoff.isRunning()) { - this.maybeStartLrsStream(); - } - }); - /* Send buffered stats information when starting LRS stream. If there is no - * buffered stats information, it will still send the node field. */ - this.sendStats(); + private get latestLrsSettings() { + return this.client.latestLrsSettings; } private sendStats() { - if (!this.lrsCall) { - return; - } if (!this.latestLrsSettings) { - this.lrsCall.write({ - node: this.lrsNode!, - }); + this.sendLrsMessage([]); return; } const clusterStats: ClusterStats[] = []; for (const [ { clusterName, edsServiceName }, stats, - ] of this.clusterStatsMap.entries()) { + ] of this.client.clusterStatsMap.entries()) { if ( this.latestLrsSettings.send_all_clusters || this.latestLrsSettings.clusters.indexOf(clusterName) > 0 @@ -664,7 +762,7 @@ export class XdsClient { localityStats.callsFailed = 0; } } - const droppedRequests: _envoy_api_v2_endpoint_ClusterStats_DroppedRequests[] = []; + const droppedRequests: DroppedRequests[] = []; let totalDroppedRequests = 0; for (const [category, count] of stats.callsDropped.entries()) { if (count > 0) { @@ -675,8 +773,10 @@ export class XdsClient { totalDroppedRequests += count; } } + totalDroppedRequests += stats.uncategorizedCallsDropped; // Clear out dropped call stats after sending them stats.callsDropped.clear(); + stats.uncategorizedCallsDropped = 0; const interval = process.hrtime(stats.intervalStart); stats.intervalStart = process.hrtime(); // Skip clusters with 0 requests @@ -695,83 +795,201 @@ export class XdsClient { } } } - trace('Sending LRS stats ' + JSON.stringify(clusterStats, undefined, 2)); - this.lrsCall.write({ - node: this.lrsNode!, - cluster_stats: clusterStats, + this.sendLrsMessage(clusterStats); + + } +} + +class XdsSingleServerClient { + public ignoreResourceDeletion: boolean; + + private adsBackoff: BackoffTimeout; + private lrsBackoff: BackoffTimeout; + + private adsClient: AggregatedDiscoveryServiceClient; + private adsCallState: AdsCallState | null = null; + + private lrsClient: LoadReportingServiceClient; + private lrsCallState: LrsCallState | null = null; + public clusterStatsMap = new ClusterLoadReportMap(); + public latestLrsSettings: LoadStatsResponse__Output | null = null; + + /** + * The number of authorities that are using this client. Streams should only + * be started if refcount > 0 + */ + private refcount = 0; + + /** + * Map of type to latest accepted version string for that type + */ + public resourceTypeVersionMap: Map = new Map(); + constructor(public xdsClient: XdsClient, bootstrapNode: Node, public xdsServerConfig: XdsServerConfig) { + this.adsBackoff = new BackoffTimeout(() => { + this.maybeStartAdsStream(); + }); + this.adsBackoff.unref(); + this.lrsBackoff = new BackoffTimeout(() => { + this.maybeStartLrsStream(); + }); + this.lrsBackoff.unref(); + this.ignoreResourceDeletion = xdsServerConfig.server_features.includes('ignore_resource_deletion'); + const channelArgs = { + // 5 minutes + 'grpc.keepalive_time_ms': 5 * 60 * 1000 + } + const credentialsConfigs = xdsServerConfig.channel_creds; + let channelCreds: ChannelCredentials | null = null; + for (const config of credentialsConfigs) { + if (config.type === 'google_default') { + channelCreds = createGoogleDefaultCredentials(); + break; + } else if (config.type === 'insecure') { + channelCreds = ChannelCredentials.createInsecure(); + break; + } + } + const serverUri = this.xdsServerConfig.server_uri + this.trace('Starting xDS client connected to server URI ' + this.xdsServerConfig.server_uri); + /* Bootstrap validation rules guarantee that a matching channel credentials + * config exists in the list. */ + const channel = new Channel(serverUri, channelCreds!, channelArgs); + const protoDefinitions = loadAdsProtos(); + this.adsClient = new protoDefinitions.envoy.service.discovery.v3.AggregatedDiscoveryService( + serverUri, + channelCreds!, + {channelOverride: channel} + ); + channel.watchConnectivityState(channel.getConnectivityState(false), Infinity, () => { + this.handleAdsConnectivityStateUpdate(); + }); + this.lrsClient = new protoDefinitions.envoy.service.load_stats.v3.LoadReportingService( + serverUri, + channelCreds!, + {channelOverride: channel} + ); + } + + private handleAdsConnectivityStateUpdate() { + const state = this.adsClient.getChannel().getConnectivityState(false); + if (state === connectivityState.READY) { + this.adsCallState?.markStreamStarted(); + } + if (state === connectivityState.TRANSIENT_FAILURE) { + for (const authorityState of this.xdsClient.authorityStateMap.values()) { + if (authorityState.client !== this) { + continue; + } + for (const typeMap of authorityState.resourceMap.values()) { + for (const resourceState of typeMap.values()) { + for (const watcher of resourceState.watchers) { + watcher.onError({ + code: status.UNAVAILABLE, + details: 'No connection established to xDS server', + metadata: new Metadata() + }); + } + } + } + } + } + this.adsClient.getChannel().watchConnectivityState(state, Infinity, () => { + this.handleAdsConnectivityStateUpdate(); }); } - addEndpointWatcher( - edsServiceName: string, - watcher: Watcher - ) { - trace('Watcher added for endpoint ' + edsServiceName); - this.adsState[EDS_TYPE_URL].addWatcher(edsServiceName, watcher); + onAdsStreamReceivedMessage() { + this.adsBackoff.stop(); + this.adsBackoff.reset(); } - removeEndpointWatcher( - edsServiceName: string, - watcher: Watcher - ) { - trace('Watcher removed for endpoint ' + edsServiceName); - this.adsState[EDS_TYPE_URL].removeWatcher(edsServiceName, watcher); + handleAdsStreamEnd() { + this.adsCallState = null; + /* The backoff timer would start the stream when it finishes. If it is not + * running, restart the stream immediately. */ + if (!this.adsBackoff.isRunning()) { + this.maybeStartAdsStream(); + } } - addClusterWatcher(clusterName: string, watcher: Watcher) { - trace('Watcher added for cluster ' + clusterName); - this.adsState[CDS_TYPE_URL].addWatcher(clusterName, watcher); + private maybeStartAdsStream() { + if (this.adsCallState || this.refcount < 1) { + return; + } + this.trace('Starting ADS stream'); + const metadata = new Metadata({waitForReady: true}); + const call = this.adsClient.StreamAggregatedResources(metadata); + this.adsCallState = new AdsCallState(this, call, this.xdsClient.adsNode!); + this.adsBackoff.runOnce(); } - removeClusterWatcher(clusterName: string, watcher: Watcher) { - trace('Watcher removed for cluster ' + clusterName); - this.adsState[CDS_TYPE_URL].removeWatcher(clusterName, watcher); + onLrsStreamReceivedMessage() { + this.lrsBackoff.stop(); + this.lrsBackoff.reset(); } - addRouteWatcher(routeConfigName: string, watcher: Watcher) { - trace('Watcher added for route ' + routeConfigName); - this.adsState[RDS_TYPE_URL].addWatcher(routeConfigName, watcher); + handleLrsStreamEnd() { + this.lrsCallState = null; + /* The backoff timer would start the stream when it finishes. If it is not + * running, restart the stream immediately. */ + if (!this.lrsBackoff.isRunning()) { + this.maybeStartLrsStream(); + } } - removeRouteWatcher(routeConfigName: string, watcher: Watcher) { - trace('Watcher removed for route ' + routeConfigName); - this.adsState[RDS_TYPE_URL].removeWatcher(routeConfigName, watcher); + private maybeStartLrsStream() { + if (this.lrsCallState || this.refcount < 1 || this.clusterStatsMap.size < 1) { + return; + } + this.trace('Starting LRS stream'); + const metadata = new Metadata({waitForReady: true}); + const call = this.lrsClient.StreamLoadStats(metadata); + this.lrsCallState = new LrsCallState(this, call, this.xdsClient.lrsNode!); + this.lrsBackoff.runOnce(); } - addListenerWatcher(targetName: string, watcher: Watcher) { - trace('Watcher added for listener ' + targetName); - this.adsState[LDS_TYPE_URL].addWatcher(targetName, watcher); + trace(text: string) { + trace(this.xdsServerConfig.server_uri + ' ' + text); } - removeListenerWatcher(targetName: string, watcher: Watcher) { - trace('Watcher removed for listener ' + targetName); - this.adsState[LDS_TYPE_URL].removeWatcher(targetName, watcher); + subscribe(type: XdsResourceType, name: XdsResourceName) { + this.trace('subscribe(type=' + type.getTypeUrl() + ', name=' + xdsResourceNameToString(name, type.getTypeUrl()) + ')'); + this.trace(JSON.stringify(name)); + this.maybeStartAdsStream(); + this.adsCallState?.subscribe(type, name); + } + + unsubscribe(type: XdsResourceType, name: XdsResourceName) { + this.trace('unsubscribe(type=' + type.getTypeUrl() + ', name=' + xdsResourceNameToString(name, type.getTypeUrl()) + ')'); + this.adsCallState?.unsubscribe(type, name); + if (this.adsCallState && !this.adsCallState.hasSubscribedResources()) { + this.adsCallState.end(); + this.adsCallState = null; + } + } + + ref() { + this.refcount += 1; + } + + unref() { + this.refcount -= 1; } - /** - * - * @param lrsServer The target name of the server to send stats to. An empty - * string indicates that the default LRS client should be used. Currently - * only the empty string is supported here. - * @param clusterName - * @param edsServiceName - */ addClusterDropStats( - lrsServer: string, clusterName: string, edsServiceName: string ): XdsClusterDropStats { - trace('addClusterDropStats(lrsServer=' + lrsServer + ', clusterName=' + clusterName + ', edsServiceName=' + edsServiceName + ')'); - if (lrsServer !== '') { - return { - addCallDropped: (category) => {}, - }; - } + this.trace('addClusterDropStats(clusterName=' + clusterName + ', edsServiceName=' + edsServiceName + ')'); const clusterStats = this.clusterStatsMap.getOrCreate( clusterName, edsServiceName ); + this.maybeStartLrsStream(); return { + addUncategorizedCallDropped: () => { + clusterStats.uncategorizedCallsDropped += 1; + }, addCallDropped: (category) => { const prevCount = clusterStats.callsDropped.get(category) ?? 0; clusterStats.callsDropped.set(category, prevCount + 1); @@ -779,23 +997,22 @@ export class XdsClient { }; } + removeClusterDropStats(clusterName: string, edsServiceName: string) { + this.trace('removeClusterDropStats(clusterName=' + clusterName + ', edsServiceName=' + edsServiceName + ')'); + this.clusterStatsMap.unref(clusterName, edsServiceName); + } + addClusterLocalityStats( - lrsServer: string, clusterName: string, edsServiceName: string, locality: Locality__Output ): XdsClusterLocalityStats { - trace('addClusterLocalityStats(lrsServer=' + lrsServer + ', clusterName=' + clusterName + ', edsServiceName=' + edsServiceName + ', locality=' + JSON.stringify(locality) + ')'); - if (lrsServer !== '') { - return { - addCallStarted: () => {}, - addCallFinished: (fail) => {}, - }; - } + this.trace('addClusterLocalityStats(clusterName=' + clusterName + ', edsServiceName=' + edsServiceName + ', locality=' + JSON.stringify(locality) + ')'); const clusterStats = this.clusterStatsMap.getOrCreate( clusterName, edsServiceName ); + this.maybeStartLrsStream(); let localityStats: ClusterLocalityStats | null = null; for (const statsObj of clusterStats.localityStats) { if (localityEqual(locality, statsObj.locality)) { @@ -810,8 +1027,9 @@ export class XdsClient { callsStarted: 0, callsSucceeded: 0, callsFailed: 0, + refcount: 0, }; - clusterStats.localityStats.push(localityStats); + clusterStats.localityStats.add(localityStats); } /* Help the compiler understand that this object is always non-null in the * closure */ @@ -832,12 +1050,249 @@ export class XdsClient { }; } - private shutdown(): void { - this.adsCall?.cancel(); - this.adsClient?.close(); - this.lrsCall?.cancel(); - this.lrsClient?.close(); - this.hasShutdown = true; + removeClusterLocalityStats( + clusterName: string, + edsServiceName: string, + locality: Locality__Output + ) { + this.trace('removeClusterLocalityStats(clusterName=' + clusterName + ', edsServiceName=' + edsServiceName + ', locality=' + JSON.stringify(locality) + ')'); + const clusterStats = this.clusterStatsMap.get(clusterName, edsServiceName); + if (!clusterStats) { + return; + } + for (const statsObj of clusterStats.localityStats) { + if (localityEqual(locality, statsObj.locality)) { + statsObj.refcount -= 1; + if (statsObj.refcount === 0) { + clusterStats.localityStats.delete(statsObj); + } + break; + } + } + this.clusterStatsMap.unref(clusterName, edsServiceName); + } +} + +interface ClientMapEntry { + serverConfig: XdsServerConfig; + client: XdsSingleServerClient; +} + +type ClientResourceStatus = 'REQUESTED' | 'DOES_NOT_EXIST' | 'ACKED' | 'NACKED'; + +interface ResourceMetadata { + clientStatus: ClientResourceStatus; + rawResource?: Any__Output; + updateTime?: Date; + version?: string; + failedVersion?: string; + failedDetails?: string; + failedUpdateTime?: Date; +} + +interface ResourceState { + watchers: Set; + cachedResource: object | null; + meta: ResourceMetadata; + deletionIgnored: boolean; +} + +interface AuthorityState { + client: XdsSingleServerClient; + /** + * type -> key -> state + */ + resourceMap: Map>; +} + +const userAgentName = 'gRPC Node Pure JS'; + +export class XdsClient { + /** + * authority -> authority state + */ + public authorityStateMap: Map = new Map(); + private clients: ClientMapEntry[] = []; + private typeRegistry: Map = new Map(); + private bootstrapInfo: BootstrapInfo | null = null; + + constructor(bootstrapInfoOverride?: BootstrapInfo) { + if (bootstrapInfoOverride) { + this.bootstrapInfo = bootstrapInfoOverride; + } + registerXdsClientWithCsds(this); + } + + private getBootstrapInfo() { + if (!this.bootstrapInfo) { + this.bootstrapInfo = loadBootstrapInfo(); + } + return this.bootstrapInfo; + } + + get adsNode(): Node | undefined { + if (!this.bootstrapInfo) { + return undefined; + } + return { + ...this.bootstrapInfo.node, + user_agent_name: userAgentName, + user_agent_version: clientVersion, + client_features: ['envoy.lb.does_not_support_overprovisioning'], + } + } + + get lrsNode(): Node | undefined { + if (!this.bootstrapInfo) { + return undefined; + } + return { + ...this.bootstrapInfo.node, + user_agent_name: userAgentName, + user_agent_version: clientVersion, + client_features: ['envoy.lrs.supports_send_all_clusters'], + }; + } + + private getOrCreateClient(authority: string): XdsSingleServerClient { + const bootstrapInfo = this.getBootstrapInfo(); + let serverConfig: XdsServerConfig; + if (authority === 'old:') { + serverConfig = bootstrapInfo.xdsServers[0]; + } else { + if (authority in bootstrapInfo.authorities) { + serverConfig = bootstrapInfo.authorities[authority].xdsServers?.[0] ?? bootstrapInfo.xdsServers[0]; + } else { + throw new Error(`Authority ${authority} not found in bootstrap authorities list`); + } + } + for (const entry of this.clients) { + if (serverConfigEqual(serverConfig, entry.serverConfig)) { + return entry.client; + } + } + const client = new XdsSingleServerClient(this, bootstrapInfo.node, serverConfig); + this.clients.push({client, serverConfig}); + return client; + } + + private getClient(server: XdsServerConfig) { + for (const entry of this.clients) { + if (serverConfigEqual(server, entry.serverConfig)) { + return entry.client; + } + } + return undefined; + } + + getResourceType(typeUrl: string) { + return this.typeRegistry.get(typeUrl); + } + + watchResource(type: XdsResourceType, name: string, watcher: ResourceWatcherInterface) { + trace('watchResource(type=' + type.getTypeUrl() + ', name=' + name + ')'); + if (this.typeRegistry.has(type.getTypeUrl())) { + if (this.typeRegistry.get(type.getTypeUrl()) !== type) { + throw new Error(`Resource type does not match previously used type with the same type URL: ${type.getTypeUrl()}`); + } + } else { + this.typeRegistry.set(type.getTypeUrl(), type); + this.typeRegistry.set(type.getFullTypeUrl(), type); + } + const resourceName = parseXdsResourceName(name, type.getTypeUrl()); + let authorityState = this.authorityStateMap.get(resourceName.authority); + if (!authorityState) { + authorityState = { + client: this.getOrCreateClient(resourceName.authority), + resourceMap: new Map() + }; + authorityState.client.ref(); + this.authorityStateMap.set(resourceName.authority, authorityState); + } + let keyMap = authorityState.resourceMap.get(type); + if (!keyMap) { + keyMap = new Map(); + authorityState.resourceMap.set(type, keyMap); + } + let entry = keyMap.get(resourceName.key); + let isNewSubscription = false; + if (!entry) { + isNewSubscription = true; + entry = { + watchers: new Set(), + cachedResource: null, + deletionIgnored: false, + meta: { + clientStatus: 'REQUESTED' + } + }; + keyMap.set(resourceName.key, entry); + } + entry.watchers.add(watcher); + if (entry.cachedResource) { + process.nextTick(() => { + if (entry?.cachedResource) { + watcher.onGenericResourceChanged(entry.cachedResource); + } + }); + } + if (isNewSubscription) { + authorityState.client.subscribe(type, resourceName); + } + } + + cancelResourceWatch(type: XdsResourceType, name: string, watcher: ResourceWatcherInterface) { + trace('cancelResourceWatch(type=' + type.getTypeUrl() + ', name=' + name + ')'); + const resourceName = parseXdsResourceName(name, type.getTypeUrl()); + const authorityState = this.authorityStateMap.get(resourceName.authority); + if (!authorityState) { + return; + } + const entry = authorityState.resourceMap.get(type)?.get(resourceName.key); + if (entry) { + entry.watchers.delete(watcher); + if (entry.watchers.size === 0) { + authorityState.resourceMap.get(type)!.delete(resourceName.key); + authorityState.client.unsubscribe(type, resourceName); + if (authorityState.resourceMap.get(type)!.size === 0) { + authorityState.resourceMap.delete(type); + if (authorityState.resourceMap.size === 0) { + authorityState.client.unref(); + this.authorityStateMap.delete(resourceName.authority); + } + } + } + } + } + + addClusterDropStats(lrsServer: XdsServerConfig, clusterName: string, edsServiceName: string): XdsClusterDropStats { + const client = this.getClient(lrsServer); + if (!client) { + return { + addUncategorizedCallDropped: () => {}, + addCallDropped: (category) => {}, + }; + } + return client.addClusterDropStats(clusterName, edsServiceName); + } + + removeClusterDropStats(lrsServer: XdsServerConfig, clusterName: string, edsServiceName: string) { + this.getClient(lrsServer)?.removeClusterDropStats(clusterName, edsServiceName); + } + + addClusterLocalityStats(lrsServer: XdsServerConfig, clusterName: string, edsServiceName: string, locality: Locality__Output): XdsClusterLocalityStats { + const client = this.getClient(lrsServer); + if (!client) { + return { + addCallStarted: () => {}, + addCallFinished: (fail) => {}, + }; + } + return client.addClusterLocalityStats(clusterName, edsServiceName, locality); + } + + removeClusterLocalityStats(lrsServer: XdsServerConfig, clusterName: string, edsServiceName: string, locality: Locality__Output) { + this.getClient(lrsServer)?.removeClusterLocalityStats(clusterName, edsServiceName, locality); } } @@ -848,4 +1303,4 @@ export function getSingletonXdsClient(): XdsClient { singletonXdsClient = new XdsClient(); } return singletonXdsClient; -} \ No newline at end of file +} diff --git a/packages/grpc-js-xds/src/xds-resource-type/cluster-resource-type.ts b/packages/grpc-js-xds/src/xds-resource-type/cluster-resource-type.ts new file mode 100644 index 000000000..c4081baa8 --- /dev/null +++ b/packages/grpc-js-xds/src/xds-resource-type/cluster-resource-type.ts @@ -0,0 +1,311 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { CDS_TYPE_URL, CLUSTER_CONFIG_TYPE_URL, decodeSingleResource } from "../resources"; +import { XdsDecodeContext, XdsDecodeResult, XdsResourceType } from "./xds-resource-type"; +import { LoadBalancingConfig, experimental, logVerbosity } from "@grpc/grpc-js"; +import { XdsServerConfig } from "../xds-bootstrap"; +import { Duration__Output } from "../generated/google/protobuf/Duration"; +import { OutlierDetection__Output } from "../generated/envoy/config/cluster/v3/OutlierDetection"; +import { EXPERIMENTAL_CUSTOM_LB_CONFIG, EXPERIMENTAL_OUTLIER_DETECTION, EXPERIMENTAL_RING_HASH } from "../environment"; +import { Cluster__Output } from "../generated/envoy/config/cluster/v3/Cluster"; +import { UInt32Value__Output } from "../generated/google/protobuf/UInt32Value"; +import { Any__Output } from "../generated/google/protobuf/Any"; +import { Watcher, XdsClient } from "../xds-client"; +import { protoDurationToDuration } from "../duration"; +import { convertToLoadBalancingConfig } from "../lb-policy-registry"; +import SuccessRateEjectionConfig = experimental.SuccessRateEjectionConfig; +import FailurePercentageEjectionConfig = experimental.FailurePercentageEjectionConfig; +import parseLoadBalancingConfig = experimental.parseLoadBalancingConfig; + +const TRACER_NAME = 'xds_client'; + +function trace(text: string): void { + experimental.trace(logVerbosity.DEBUG, TRACER_NAME, text); +} + + +export interface CdsUpdate { + type: 'AGGREGATE' | 'EDS' | 'LOGICAL_DNS'; + name: string; + aggregateChildren: string[]; + lrsLoadReportingServer?: XdsServerConfig; + maxConcurrentRequests?: number; + edsServiceName?: string; + dnsHostname?: string; + lbPolicyConfig: LoadBalancingConfig[]; + outlierDetectionUpdate?: experimental.OutlierDetectionRawConfig; +} + +function convertOutlierDetectionUpdate(outlierDetection: OutlierDetection__Output | null): experimental.OutlierDetectionRawConfig | undefined { + if (!EXPERIMENTAL_OUTLIER_DETECTION) { + return undefined; + } + if (!outlierDetection) { + /* No-op outlier detection config, with all fields unset. */ + return { + child_policy: [] + }; + } + let successRateConfig: Partial | undefined = undefined; + /* Success rate ejection is enabled by default, so we only disable it if + * enforcing_success_rate is set and it has the value 0 */ + if (!outlierDetection.enforcing_success_rate || outlierDetection.enforcing_success_rate.value > 0) { + successRateConfig = { + enforcement_percentage: outlierDetection.enforcing_success_rate?.value, + minimum_hosts: outlierDetection.success_rate_minimum_hosts?.value, + request_volume: outlierDetection.success_rate_request_volume?.value, + stdev_factor: outlierDetection.success_rate_stdev_factor?.value + }; + } + let failurePercentageConfig: Partial | undefined = undefined; + /* Failure percentage ejection is disabled by default, so we only enable it + * if enforcing_failure_percentage is set and it has a value greater than 0 */ + if (outlierDetection.enforcing_failure_percentage && outlierDetection.enforcing_failure_percentage.value > 0) { + failurePercentageConfig = { + enforcement_percentage: outlierDetection.enforcing_failure_percentage.value, + minimum_hosts: outlierDetection.failure_percentage_minimum_hosts?.value, + request_volume: outlierDetection.failure_percentage_request_volume?.value, + threshold: outlierDetection.failure_percentage_threshold?.value + } + } + return { + interval: outlierDetection.interval ? protoDurationToDuration(outlierDetection.interval) : undefined, + base_ejection_time: outlierDetection.base_ejection_time ? protoDurationToDuration(outlierDetection.base_ejection_time) : undefined, + max_ejection_time: outlierDetection.max_ejection_time ? protoDurationToDuration(outlierDetection.max_ejection_time) : undefined, + max_ejection_percent: outlierDetection.max_ejection_percent?.value, + success_rate_ejection: successRateConfig, + failure_percentage_ejection: failurePercentageConfig, + child_policy: [] + }; +} + +export class ClusterResourceType extends XdsResourceType { + private static singleton: ClusterResourceType = new ClusterResourceType(); + + private constructor() { + super(); + } + + static get() { + return ClusterResourceType.singleton; + } + + getTypeUrl(): string { + return 'envoy.config.cluster.v3.Cluster'; + } + + private validateNonnegativeDuration(duration: Duration__Output | null): boolean { + if (!duration) { + return true; + } + /* The maximum values here come from the official Protobuf documentation: + * https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration + */ + return Number(duration.seconds) >= 0 && + Number(duration.seconds) <= 315_576_000_000 && + duration.nanos >= 0 && + duration.nanos <= 999_999_999; + } + + private validatePercentage(percentage: UInt32Value__Output | null): boolean { + if (!percentage) { + return true; + } + return percentage.value >=0 && percentage.value <= 100; + } + + private validateResource(context: XdsDecodeContext, message: Cluster__Output): CdsUpdate | null { + let lbPolicyConfig: LoadBalancingConfig; + if (EXPERIMENTAL_CUSTOM_LB_CONFIG && message.load_balancing_policy) { + try { + lbPolicyConfig = convertToLoadBalancingConfig(message.load_balancing_policy); + } catch (e) { + trace('LB policy config parsing failed with error ' + e); + return null; + } + try { + parseLoadBalancingConfig(lbPolicyConfig); + } catch (e) { + trace('LB policy config parsing failed with error ' + e); + return null; + } + } else if (message.lb_policy === 'ROUND_ROBIN') { + lbPolicyConfig = { + xds_wrr_locality: { + child_policy: [{round_robin: {}}] + } + }; + } else if(EXPERIMENTAL_RING_HASH && message.lb_policy === 'RING_HASH') { + if (message.ring_hash_lb_config && message.ring_hash_lb_config.hash_function !== 'XX_HASH') { + return null; + } + const minRingSize = message.ring_hash_lb_config?.minimum_ring_size ? Number(message.ring_hash_lb_config.minimum_ring_size.value) : 1024; + if (minRingSize > 8_388_608) { + return null; + } + const maxRingSize = message.ring_hash_lb_config?.maximum_ring_size ? Number(message.ring_hash_lb_config.maximum_ring_size.value) : 8_388_608; + if (maxRingSize > 8_388_608) { + return null; + } + lbPolicyConfig = { + ring_hash: { + min_ring_size: minRingSize, + max_ring_size: maxRingSize + } + }; + } else { + return null; + } + if (message.lrs_server) { + if (!message.lrs_server.self) { + return null; + } + } + if (EXPERIMENTAL_OUTLIER_DETECTION) { + if (message.outlier_detection) { + if (!this.validateNonnegativeDuration(message.outlier_detection.interval)) { + return null; + } + if (!this.validateNonnegativeDuration(message.outlier_detection.base_ejection_time)) { + return null; + } + if (!this.validateNonnegativeDuration(message.outlier_detection.max_ejection_time)) { + return null; + } + if (!this.validatePercentage(message.outlier_detection.max_ejection_percent)) { + return null; + } + if (!this.validatePercentage(message.outlier_detection.enforcing_success_rate)) { + return null; + } + if (!this.validatePercentage(message.outlier_detection.failure_percentage_threshold)) { + return null; + } + if (!this.validatePercentage(message.outlier_detection.enforcing_failure_percentage)) { + return null; + } + } + } + if (message.cluster_discovery_type === 'cluster_type') { + if (!(message.cluster_type?.typed_config && message.cluster_type.typed_config.type_url === CLUSTER_CONFIG_TYPE_URL)) { + return null; + } + const clusterConfig = decodeSingleResource(CLUSTER_CONFIG_TYPE_URL, message.cluster_type.typed_config.value); + if (clusterConfig.clusters.length === 0) { + return null; + } + return { + type: 'AGGREGATE', + name: message.name, + aggregateChildren: clusterConfig.clusters, + outlierDetectionUpdate: convertOutlierDetectionUpdate(null), + lbPolicyConfig: [lbPolicyConfig] + }; + } else { + let maxConcurrentRequests: number | undefined = undefined; + for (const threshold of message.circuit_breakers?.thresholds ?? []) { + if (threshold.priority === 'DEFAULT') { + maxConcurrentRequests = threshold.max_requests?.value; + } + } + if (message.type === 'EDS') { + if (!message.eds_cluster_config?.eds_config?.ads && !message.eds_cluster_config?.eds_config?.self) { + return null; + } + if (message.name.startsWith('xdstp:') && message.eds_cluster_config.service_name === '') { + return null; + } + return { + type: 'EDS', + name: message.name, + aggregateChildren: [], + maxConcurrentRequests: maxConcurrentRequests, + edsServiceName: message.eds_cluster_config.service_name === '' ? undefined : message.eds_cluster_config.service_name, + lrsLoadReportingServer: message.lrs_server ? context.server : undefined, + outlierDetectionUpdate: convertOutlierDetectionUpdate(message.outlier_detection), + lbPolicyConfig: [lbPolicyConfig] + } + } else if (message.type === 'LOGICAL_DNS') { + if (!message.load_assignment) { + return null; + } + if (message.load_assignment.endpoints.length !== 1) { + return null; + } + if (message.load_assignment.endpoints[0].lb_endpoints.length !== 1) { + return null; + } + const socketAddress = message.load_assignment.endpoints[0].lb_endpoints[0].endpoint?.address?.socket_address; + if (!socketAddress) { + return null; + } + if (socketAddress.address === '') { + return null; + } + if (socketAddress.port_specifier !== 'port_value') { + return null; + } + return { + type: 'LOGICAL_DNS', + name: message.name, + aggregateChildren: [], + maxConcurrentRequests: maxConcurrentRequests, + dnsHostname: `${socketAddress.address}:${socketAddress.port_value}`, + lrsLoadReportingServer: message.lrs_server ? context.server : undefined, + outlierDetectionUpdate: convertOutlierDetectionUpdate(message.outlier_detection), + lbPolicyConfig: [lbPolicyConfig] + }; + } + } + return null; + } + + decode(context:XdsDecodeContext, resource: Any__Output): XdsDecodeResult { + if (resource.type_url !== CDS_TYPE_URL) { + throw new Error( + `ADS Error: Invalid resource type ${resource.type_url}, expected ${CDS_TYPE_URL}` + ); + } + const message = decodeSingleResource(CDS_TYPE_URL, resource.value); + trace('Decoded raw resource of type ' + CDS_TYPE_URL + ': ' + JSON.stringify(message, undefined, 2)); + const validatedMessage = this.validateResource(context, message); + if (validatedMessage) { + return { + name: validatedMessage.name, + value: validatedMessage + }; + } else { + return { + name: message.name, + error: 'Cluster message validation failed' + }; + } + } + + allResourcesRequiredInSotW(): boolean { + return true; + } + + static startWatch(client: XdsClient, name: string, watcher: Watcher) { + client.watchResource(ClusterResourceType.get(), name, watcher); + } + + static cancelWatch(client: XdsClient, name: string, watcher: Watcher) { + client.cancelResourceWatch(ClusterResourceType.get(), name, watcher); + } +} diff --git a/packages/grpc-js-xds/src/xds-resource-type/endpoint-resource-type.ts b/packages/grpc-js-xds/src/xds-resource-type/endpoint-resource-type.ts new file mode 100644 index 000000000..093ca52e5 --- /dev/null +++ b/packages/grpc-js-xds/src/xds-resource-type/endpoint-resource-type.ts @@ -0,0 +1,130 @@ +import { experimental, logVerbosity } from "@grpc/grpc-js"; +import { ClusterLoadAssignment__Output } from "../generated/envoy/config/endpoint/v3/ClusterLoadAssignment"; +import { XdsDecodeContext, XdsDecodeResult, XdsResourceType } from "./xds-resource-type"; +import { Locality__Output } from "../generated/envoy/config/core/v3/Locality"; +import { SocketAddress__Output } from "../generated/envoy/config/core/v3/SocketAddress"; +import { isIPv4, isIPv6 } from "net"; +import { Any__Output } from "../generated/google/protobuf/Any"; +import { EDS_TYPE_URL, decodeSingleResource } from "../resources"; +import { Watcher, XdsClient } from "../xds-client"; + +const TRACER_NAME = 'xds_client'; + +const UINT32_MAX = 0xFFFFFFFF; + +function trace(text: string): void { + experimental.trace(logVerbosity.DEBUG, TRACER_NAME, text); +} + +function localitiesEqual(a: Locality__Output, b: Locality__Output) { + return a.region === b.region && a.sub_zone === b.sub_zone && a.zone === b.zone; +} + +function addressesEqual(a: SocketAddress__Output, b: SocketAddress__Output) { + return a.address === b.address && a.port_value === b.port_value; +} + +export class EndpointResourceType extends XdsResourceType { + private static singleton: EndpointResourceType = new EndpointResourceType(); + + private constructor() { + super(); + } + + static get() { + return EndpointResourceType.singleton; + } + + getTypeUrl(): string { + return 'envoy.config.endpoint.v3.ClusterLoadAssignment'; + } + + private validateResource(message: ClusterLoadAssignment__Output): ClusterLoadAssignment__Output | null { + const seenLocalities: {locality: Locality__Output, priority: number}[] = []; + const seenAddresses: SocketAddress__Output[] = []; + const priorityTotalWeights: Map = new Map(); + for (const endpoint of message.endpoints) { + if (!endpoint.locality) { + trace('EDS validation: endpoint locality unset'); + return null; + } + for (const {locality, priority} of seenLocalities) { + if (localitiesEqual(endpoint.locality, locality) && endpoint.priority === priority) { + trace('EDS validation: endpoint locality duplicated: ' + JSON.stringify(locality) + ', priority=' + priority); + return null; + } + } + seenLocalities.push({locality: endpoint.locality, priority: endpoint.priority}); + for (const lb of endpoint.lb_endpoints) { + const socketAddress = lb.endpoint?.address?.socket_address; + if (!socketAddress) { + trace('EDS validation: endpoint socket_address not set'); + return null; + } + if (socketAddress.port_specifier !== 'port_value') { + trace('EDS validation: socket_address.port_specifier !== "port_value"'); + return null; + } + if (!(isIPv4(socketAddress.address) || isIPv6(socketAddress.address))) { + trace('EDS validation: address not a valid IPv4 or IPv6 address: ' + socketAddress.address); + return null; + } + for (const address of seenAddresses) { + if (addressesEqual(socketAddress, address)) { + trace('EDS validation: duplicate address seen: ' + address); + return null; + } + } + seenAddresses.push(socketAddress); + } + priorityTotalWeights.set(endpoint.priority, (priorityTotalWeights.get(endpoint.priority) ?? 0) + (endpoint.load_balancing_weight?.value ?? 0)); + } + for (const totalWeight of priorityTotalWeights.values()) { + if (totalWeight > UINT32_MAX) { + trace('EDS validation: total weight > UINT32_MAX') + return null; + } + } + for (const priority of priorityTotalWeights.keys()) { + if (priority > 0 && !priorityTotalWeights.has(priority - 1)) { + trace('EDS validation: priorities not contiguous'); + return null; + } + } + return message; + } + + decode(context: XdsDecodeContext, resource: Any__Output): XdsDecodeResult { + if (resource.type_url !== EDS_TYPE_URL) { + throw new Error( + `ADS Error: Invalid resource type ${resource.type_url}, expected ${EDS_TYPE_URL}` + ); + } + const message = decodeSingleResource(EDS_TYPE_URL, resource.value); + trace('Decoded raw resource of type ' + EDS_TYPE_URL + ': ' + JSON.stringify(message, undefined, 2)); + const validatedMessage = this.validateResource(message); + if (validatedMessage) { + return { + name: validatedMessage.cluster_name, + value: validatedMessage + }; + } else { + return { + name: message.cluster_name, + error: 'Endpoint message validation failed' + }; + } + } + + allResourcesRequiredInSotW(): boolean { + return false; + } + + static startWatch(client: XdsClient, name: string, watcher: Watcher) { + client.watchResource(EndpointResourceType.get(), name, watcher); + } + + static cancelWatch(client: XdsClient, name: string, watcher: Watcher) { + client.cancelResourceWatch(EndpointResourceType.get(), name, watcher); + } +} diff --git a/packages/grpc-js-xds/src/xds-resource-type/listener-resource-type.ts b/packages/grpc-js-xds/src/xds-resource-type/listener-resource-type.ts new file mode 100644 index 000000000..cf5d4d591 --- /dev/null +++ b/packages/grpc-js-xds/src/xds-resource-type/listener-resource-type.ts @@ -0,0 +1,135 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { logVerbosity, experimental } from "@grpc/grpc-js"; +import { EXPERIMENTAL_FAULT_INJECTION } from "../environment"; +import { Listener__Output } from "../generated/envoy/config/listener/v3/Listener"; +import { Any__Output } from "../generated/google/protobuf/Any"; +import { HTTP_CONNECTION_MANGER_TYPE_URL, LDS_TYPE_URL, decodeSingleResource } from "../resources"; +import { XdsDecodeContext, XdsDecodeResult, XdsResourceType } from "./xds-resource-type"; +import { getTopLevelFilterUrl, validateTopLevelFilter } from "../http-filter"; +import { RouteConfigurationResourceType } from "./route-config-resource-type"; +import { Watcher, XdsClient } from "../xds-client"; + +const TRACER_NAME = 'xds_client'; + +function trace(text: string): void { + experimental.trace(logVerbosity.DEBUG, TRACER_NAME, text); +} + +const ROUTER_FILTER_URL = 'type.googleapis.com/envoy.extensions.filters.http.router.v3.Router'; + +export class ListenerResourceType extends XdsResourceType { + private static singleton: ListenerResourceType = new ListenerResourceType(); + private constructor() { + super(); + } + + static get() { + return ListenerResourceType.singleton; + } + getTypeUrl(): string { + return 'envoy.config.listener.v3.Listener'; + } + + private validateResource(message: Listener__Output): Listener__Output | null { + if ( + !( + message.api_listener?.api_listener && + message.api_listener.api_listener.type_url === HTTP_CONNECTION_MANGER_TYPE_URL + ) + ) { + return null; + } + const httpConnectionManager = decodeSingleResource(HTTP_CONNECTION_MANGER_TYPE_URL, message.api_listener!.api_listener.value); + if (EXPERIMENTAL_FAULT_INJECTION) { + const filterNames = new Set(); + for (const [index, httpFilter] of httpConnectionManager.http_filters.entries()) { + if (filterNames.has(httpFilter.name)) { + trace('LDS response validation failed: duplicate HTTP filter name ' + httpFilter.name); + return null; + } + filterNames.add(httpFilter.name); + if (!validateTopLevelFilter(httpFilter)) { + trace('LDS response validation failed: ' + httpFilter.name + ' filter validation failed'); + return null; + } + /* Validate that the last filter, and only the last filter, is the + * router filter. */ + const filterUrl = getTopLevelFilterUrl(httpFilter.typed_config!) + if (index < httpConnectionManager.http_filters.length - 1) { + if (filterUrl === ROUTER_FILTER_URL) { + trace('LDS response validation failed: router filter is before end of list'); + return null; + } + } else { + if (filterUrl !== ROUTER_FILTER_URL) { + trace('LDS response validation failed: final filter is ' + filterUrl); + return null; + } + } + } + } + switch (httpConnectionManager.route_specifier) { + case 'rds': + if (!httpConnectionManager.rds?.config_source?.ads && !httpConnectionManager.rds?.config_source?.self) { + return null; + } + return message; + case 'route_config': + if (!RouteConfigurationResourceType.get().validateResource(httpConnectionManager.route_config!)) { + return null; + } + return message; + } + return null; + } + + decode(context: XdsDecodeContext, resource: Any__Output): XdsDecodeResult { + if (resource.type_url !== LDS_TYPE_URL) { + throw new Error( + `ADS Error: Invalid resource type ${resource.type_url}, expected ${LDS_TYPE_URL}` + ); + } + const message = decodeSingleResource(LDS_TYPE_URL, resource.value); + trace('Decoded raw resource of type ' + LDS_TYPE_URL + ': ' + JSON.stringify(message, (key, value) => (value && value.type === 'Buffer' && Array.isArray(value.data)) ? (value.data as Number[]).map(n => n.toString(16)).join('') : value, 2)); + const validatedMessage = this.validateResource(message); + if (validatedMessage) { + return { + name: validatedMessage.name, + value: validatedMessage + }; + } else { + return { + name: message.name, + error: 'Listener message validation failed' + }; + } + } + + allResourcesRequiredInSotW(): boolean { + return true; + } + + static startWatch(client: XdsClient, name: string, watcher: Watcher) { + client.watchResource(ListenerResourceType.get(), name, watcher); + } + + static cancelWatch(client: XdsClient, name: string, watcher: Watcher) { + client.cancelResourceWatch(ListenerResourceType.get(), name, watcher); + } +} diff --git a/packages/grpc-js-xds/src/xds-resource-type/route-config-resource-type.ts b/packages/grpc-js-xds/src/xds-resource-type/route-config-resource-type.ts new file mode 100644 index 000000000..766a84388 --- /dev/null +++ b/packages/grpc-js-xds/src/xds-resource-type/route-config-resource-type.ts @@ -0,0 +1,204 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { experimental, logVerbosity } from "@grpc/grpc-js"; +import { EXPERIMENTAL_FAULT_INJECTION, EXPERIMENTAL_RETRY } from "../environment"; +import { RetryPolicy__Output } from "../generated/envoy/config/route/v3/RetryPolicy"; +import { RouteConfiguration__Output } from "../generated/envoy/config/route/v3/RouteConfiguration"; +import { Any__Output } from "../generated/google/protobuf/Any"; +import { Duration__Output } from "../generated/google/protobuf/Duration"; +import { validateOverrideFilter } from "../http-filter"; +import { RDS_TYPE_URL, decodeSingleResource } from "../resources"; +import { Watcher, XdsClient } from "../xds-client"; +import { XdsDecodeContext, XdsDecodeResult, XdsResourceType } from "./xds-resource-type"; +const TRACER_NAME = 'xds_client'; + +function trace(text: string): void { + experimental.trace(logVerbosity.DEBUG, TRACER_NAME, text); +} + +const SUPPORTED_PATH_SPECIFIERS = ['prefix', 'path', 'safe_regex']; +const SUPPPORTED_HEADER_MATCH_SPECIFIERS = [ + 'exact_match', + 'safe_regex_match', + 'range_match', + 'present_match', + 'prefix_match', + 'suffix_match', + 'string_match']; +const SUPPORTED_CLUSTER_SPECIFIERS = ['cluster', 'weighted_clusters', 'cluster_header']; + +const UINT32_MAX = 0xFFFFFFFF; + +function durationToMs(duration: Duration__Output | null): number | null { + if (duration === null) { + return null; + } + return (Number.parseInt(duration.seconds) * 1000 + duration.nanos / 1_000_000) | 0; +} + +export class RouteConfigurationResourceType extends XdsResourceType { + private static singleton: RouteConfigurationResourceType = new RouteConfigurationResourceType(); + + private constructor() { + super(); + } + + static get() { + return RouteConfigurationResourceType.singleton; + } + + getTypeUrl(): string { + return 'envoy.config.route.v3.RouteConfiguration'; + } + + private validateRetryPolicy(policy: RetryPolicy__Output | null): boolean { + if (policy === null) { + return true; + } + const numRetries = policy.num_retries?.value ?? 1 + if (numRetries < 1) { + return false; + } + if (policy.retry_back_off) { + if (!policy.retry_back_off.base_interval) { + return false; + } + const baseInterval = durationToMs(policy.retry_back_off.base_interval)!; + const maxInterval = durationToMs(policy.retry_back_off.max_interval) ?? (10 * baseInterval); + if (!(maxInterval >= baseInterval) && (baseInterval > 0)) { + return false; + } + } + return true; + } + + public validateResource(message: RouteConfiguration__Output): RouteConfiguration__Output | null { + // https://github.com/grpc/proposal/blob/master/A28-xds-traffic-splitting-and-routing.md#response-validation + for (const virtualHost of message.virtual_hosts) { + for (const domainPattern of virtualHost.domains) { + const starIndex = domainPattern.indexOf('*'); + const lastStarIndex = domainPattern.lastIndexOf('*'); + // A domain pattern can have at most one wildcard * + if (starIndex !== lastStarIndex) { + return null; + } + // A wildcard * can either be absent or at the beginning or end of the pattern + if (!(starIndex === -1 || starIndex === 0 || starIndex === domainPattern.length - 1)) { + return null; + } + } + if (EXPERIMENTAL_FAULT_INJECTION) { + for (const filterConfig of Object.values(virtualHost.typed_per_filter_config ?? {})) { + if (!validateOverrideFilter(filterConfig)) { + return null; + } + } + } + if (EXPERIMENTAL_RETRY) { + if (!this.validateRetryPolicy(virtualHost.retry_policy)) { + return null; + } + } + for (const route of virtualHost.routes) { + const match = route.match; + if (!match) { + return null; + } + if (SUPPORTED_PATH_SPECIFIERS.indexOf(match.path_specifier) < 0) { + return null; + } + for (const headers of match.headers) { + if (SUPPPORTED_HEADER_MATCH_SPECIFIERS.indexOf(headers.header_match_specifier) < 0) { + return null; + } + } + if (route.action !== 'route') { + return null; + } + if ((route.route === undefined) || (route.route === null) || SUPPORTED_CLUSTER_SPECIFIERS.indexOf(route.route.cluster_specifier) < 0) { + return null; + } + if (EXPERIMENTAL_FAULT_INJECTION) { + for (const [name, filterConfig] of Object.entries(route.typed_per_filter_config ?? {})) { + if (!validateOverrideFilter(filterConfig)) { + return null; + } + } + } + if (EXPERIMENTAL_RETRY) { + if (!this.validateRetryPolicy(route.route.retry_policy)) { + return null; + } + } + if (route.route!.cluster_specifier === 'weighted_clusters') { + let weightSum = 0; + for (const clusterWeight of route.route.weighted_clusters!.clusters) { + weightSum += clusterWeight.weight?.value ?? 0; + } + if (weightSum === 0 || weightSum > UINT32_MAX) { + return null; + } + if (EXPERIMENTAL_FAULT_INJECTION) { + for (const weightedCluster of route.route!.weighted_clusters!.clusters) { + for (const filterConfig of Object.values(weightedCluster.typed_per_filter_config ?? {})) { + if (!validateOverrideFilter(filterConfig)) { + return null; + } + } + } + } + } + } + } + return message; + } + + decode(context: XdsDecodeContext, resource: Any__Output): XdsDecodeResult { + if (resource.type_url !== RDS_TYPE_URL) { + throw new Error( + `ADS Error: Invalid resource type ${resource.type_url}, expected ${RDS_TYPE_URL}` + ); + } + const message = decodeSingleResource(RDS_TYPE_URL, resource.value); + trace('Decoded raw resource of type ' + RDS_TYPE_URL + ': ' + JSON.stringify(message, undefined, 2)); + const validatedMessage = this.validateResource(message); + if (validatedMessage) { + return { + name: validatedMessage.name, + value: validatedMessage + }; + } else { + return { + name: message.name, + error: 'Route configuration message validation failed' + }; + } + } + + allResourcesRequiredInSotW(): boolean { + return false; + } + + static startWatch(client: XdsClient, name: string, watcher: Watcher) { + client.watchResource(RouteConfigurationResourceType.get(), name, watcher); + } + + static cancelWatch(client: XdsClient, name: string, watcher: Watcher) { + client.cancelResourceWatch(RouteConfigurationResourceType.get(), name, watcher); + } +} diff --git a/packages/grpc-js-xds/src/xds-resource-type/xds-resource-type.ts b/packages/grpc-js-xds/src/xds-resource-type/xds-resource-type.ts new file mode 100644 index 000000000..8c2dc5e4a --- /dev/null +++ b/packages/grpc-js-xds/src/xds-resource-type/xds-resource-type.ts @@ -0,0 +1,91 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { Any__Output } from "../generated/google/protobuf/Any"; +import { XdsServerConfig } from "../xds-bootstrap"; + +export interface XdsDecodeContext { + server: XdsServerConfig; +} + +export interface XdsDecodeResult { + name: string; + /** + * Mutually exclusive with error. + */ + value?: object; + /** + * Mutually exclusive with value. + */ + error?: string; +} + +type ValueType = string | number | bigint | boolean | undefined | null | symbol | {[key: string]: ValueType} | ValueType[]; + +function deepEqual(value1: ValueType, value2: ValueType): boolean { + if (value1 === value2) { + return true; + } + // Extra null check to narrow type result of typeof value === 'object' + if (value1 === null || value2 === null) { + // They are not equal per previous check + return false; + } + if (Array.isArray(value1) && Array.isArray(value2)) { + if (value1.length !== value2.length) { + return false; + } + for (const [index, entry] of value1.entries()) { + if (!deepEqual(entry, value2[index])) { + return false; + } + } + return true; + } else if (Array.isArray(value1) || Array.isArray(value2)) { + return false; + } else if (typeof value1 === 'object' && typeof value2 === 'object') { + for (const [key, entry] of Object.entries(value1)) { + if (!deepEqual(entry, value2[key])) { + return false; + } + } + return true; + } + return false; +} + +export abstract class XdsResourceType { + /** + * The type URL as used in xdstp: names + */ + abstract getTypeUrl(): string; + + /** + * The type URL as used in the `DiscoveryResponse.type_url` field and the `Any.type_url` field + */ + getFullTypeUrl(): string { + return `type.googleapis.com/${this.getTypeUrl()}`; + } + + abstract decode(context: XdsDecodeContext, resource: Any__Output): XdsDecodeResult; + + abstract allResourcesRequiredInSotW(): boolean; + + resourcesEqual(value1: object | null, value2: object | null): boolean { + return deepEqual(value1 as ValueType, value2 as ValueType); + } +} diff --git a/packages/grpc-js-xds/src/xds-stream-state/cds-state.ts b/packages/grpc-js-xds/src/xds-stream-state/cds-state.ts deleted file mode 100644 index 343089958..000000000 --- a/packages/grpc-js-xds/src/xds-stream-state/cds-state.ts +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -import { experimental, logVerbosity, StatusObject } from "@grpc/grpc-js"; -import { Cluster__Output } from "../generated/envoy/api/v2/Cluster"; -import { EdsState } from "./eds-state"; -import { Watcher, XdsStreamState } from "./xds-stream-state"; - -const TRACER_NAME = 'xds_client'; - -function trace(text: string): void { - experimental.trace(logVerbosity.DEBUG, TRACER_NAME, text); -} - -export class CdsState implements XdsStreamState { - versionInfo = ''; - nonce = ''; - - private watchers: Map[]> = new Map< - string, - Watcher[] - >(); - - private latestResponses: Cluster__Output[] = []; - - constructor( - private edsState: EdsState, - private updateResourceNames: () => void - ) {} - - /** - * Add the watcher to the watcher list. Returns true if the list of resource - * names has changed, and false otherwise. - * @param clusterName - * @param watcher - */ - addWatcher(clusterName: string, watcher: Watcher): void { - trace('Adding CDS watcher for clusterName ' + clusterName); - let watchersEntry = this.watchers.get(clusterName); - let addedServiceName = false; - if (watchersEntry === undefined) { - addedServiceName = true; - watchersEntry = []; - this.watchers.set(clusterName, watchersEntry); - } - watchersEntry.push(watcher); - - /* If we have already received an update for the requested edsServiceName, - * immediately pass that update along to the watcher */ - for (const message of this.latestResponses) { - if (message.name === clusterName) { - /* These updates normally occur asynchronously, so we ensure that - * the same happens here */ - process.nextTick(() => { - trace('Reporting existing CDS update for new watcher for clusterName ' + clusterName); - watcher.onValidUpdate(message); - }); - } - } - if (addedServiceName) { - this.updateResourceNames(); - } - } - - removeWatcher(clusterName: string, watcher: Watcher): void { - trace('Removing CDS watcher for clusterName ' + clusterName); - const watchersEntry = this.watchers.get(clusterName); - let removedServiceName = false; - if (watchersEntry !== undefined) { - const entryIndex = watchersEntry.indexOf(watcher); - if (entryIndex >= 0) { - watchersEntry.splice(entryIndex, 1); - } - if (watchersEntry.length === 0) { - removedServiceName = true; - this.watchers.delete(clusterName); - } - } - if (removedServiceName) { - this.updateResourceNames(); - } - } - - getResourceNames(): string[] { - return Array.from(this.watchers.keys()); - } - - private validateResponse(message: Cluster__Output): boolean { - if (message.type !== 'EDS') { - return false; - } - if (!message.eds_cluster_config?.eds_config?.ads) { - return false; - } - if (message.lb_policy !== 'ROUND_ROBIN') { - return false; - } - if (message.lrs_server) { - if (!message.lrs_server.self) { - return false; - } - } - return true; - } - - /** - * Given a list of clusterNames (which may actually be the cluster name), - * for each watcher watching a name not on the list, call that watcher's - * onResourceDoesNotExist method. - * @param allClusterNames - */ - private handleMissingNames(allClusterNames: Set) { - for (const [clusterName, watcherList] of this.watchers.entries()) { - if (!allClusterNames.has(clusterName)) { - trace('Reporting CDS resource does not exist for clusterName ' + clusterName); - for (const watcher of watcherList) { - watcher.onResourceDoesNotExist(); - } - } - } - } - - handleResponses(responses: Cluster__Output[]): string | null { - for (const message of responses) { - if (!this.validateResponse(message)) { - trace('CDS validation failed for message ' + JSON.stringify(message)); - return 'CDS Error: Cluster validation failed'; - } - } - this.latestResponses = responses; - const allEdsServiceNames: Set = new Set(); - const allClusterNames: Set = new Set(); - for (const message of responses) { - allClusterNames.add(message.name); - const edsServiceName = message.eds_cluster_config?.service_name ?? ''; - allEdsServiceNames.add( - edsServiceName === '' ? message.name : edsServiceName - ); - const watchers = this.watchers.get(message.name) ?? []; - for (const watcher of watchers) { - watcher.onValidUpdate(message); - } - } - trace('Received CDS updates for cluster names ' + Array.from(allClusterNames)); - this.handleMissingNames(allClusterNames); - this.edsState.handleMissingNames(allEdsServiceNames); - return null; - } - - reportStreamError(status: StatusObject): void { - for (const watcherList of this.watchers.values()) { - for (const watcher of watcherList) { - watcher.onTransientError(status); - } - } - } -} \ No newline at end of file diff --git a/packages/grpc-js-xds/src/xds-stream-state/eds-state.ts b/packages/grpc-js-xds/src/xds-stream-state/eds-state.ts deleted file mode 100644 index c9beef292..000000000 --- a/packages/grpc-js-xds/src/xds-stream-state/eds-state.ts +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -import { experimental, logVerbosity, StatusObject } from "@grpc/grpc-js"; -import { isIPv4, isIPv6 } from "net"; -import { ClusterLoadAssignment__Output } from "../generated/envoy/api/v2/ClusterLoadAssignment"; -import { Watcher, XdsStreamState } from "./xds-stream-state"; - -const TRACER_NAME = 'xds_client'; - -function trace(text: string): void { - experimental.trace(logVerbosity.DEBUG, TRACER_NAME, text); -} - -export class EdsState implements XdsStreamState { - public versionInfo = ''; - public nonce = ''; - - private watchers: Map< - string, - Watcher[] - > = new Map[]>(); - - private latestResponses: ClusterLoadAssignment__Output[] = []; - - constructor(private updateResourceNames: () => void) {} - - /** - * Add the watcher to the watcher list. Returns true if the list of resource - * names has changed, and false otherwise. - * @param edsServiceName - * @param watcher - */ - addWatcher( - edsServiceName: string, - watcher: Watcher - ): void { - let watchersEntry = this.watchers.get(edsServiceName); - let addedServiceName = false; - if (watchersEntry === undefined) { - addedServiceName = true; - watchersEntry = []; - this.watchers.set(edsServiceName, watchersEntry); - } - trace('Adding EDS watcher (' + watchersEntry.length + ' ->' + (watchersEntry.length + 1) + ') for edsServiceName ' + edsServiceName); - watchersEntry.push(watcher); - - /* If we have already received an update for the requested edsServiceName, - * immediately pass that update along to the watcher */ - for (const message of this.latestResponses) { - if (message.cluster_name === edsServiceName) { - /* These updates normally occur asynchronously, so we ensure that - * the same happens here */ - process.nextTick(() => { - trace('Reporting existing EDS update for new watcher for edsServiceName ' + edsServiceName); - watcher.onValidUpdate(message); - }); - } - } - if (addedServiceName) { - this.updateResourceNames(); - } - } - - removeWatcher( - edsServiceName: string, - watcher: Watcher - ): void { - trace('Removing EDS watcher for edsServiceName ' + edsServiceName); - const watchersEntry = this.watchers.get(edsServiceName); - let removedServiceName = false; - if (watchersEntry !== undefined) { - const entryIndex = watchersEntry.indexOf(watcher); - if (entryIndex >= 0) { - trace('Removed EDS watcher (' + watchersEntry.length + ' -> ' + (watchersEntry.length - 1) + ') for edsServiceName ' + edsServiceName); - watchersEntry.splice(entryIndex, 1); - } - if (watchersEntry.length === 0) { - removedServiceName = true; - this.watchers.delete(edsServiceName); - } - } - if (removedServiceName) { - this.updateResourceNames(); - } - } - - getResourceNames(): string[] { - return Array.from(this.watchers.keys()); - } - - /** - * Validate the ClusterLoadAssignment object by these rules: - * https://github.com/grpc/proposal/blob/master/A27-xds-global-load-balancing.md#clusterloadassignment-proto - * @param message - */ - private validateResponse(message: ClusterLoadAssignment__Output) { - for (const endpoint of message.endpoints) { - for (const lb of endpoint.lb_endpoints) { - const socketAddress = lb.endpoint?.address?.socket_address; - if (!socketAddress) { - return false; - } - if (socketAddress.port_specifier !== 'port_value') { - return false; - } - if (!(isIPv4(socketAddress.address) || isIPv6(socketAddress.address))) { - return false; - } - } - } - return true; - } - - /** - * Given a list of edsServiceNames (which may actually be the cluster name), - * for each watcher watching a name not on the list, call that watcher's - * onResourceDoesNotExist method. - * @param allClusterNames - */ - handleMissingNames(allEdsServiceNames: Set) { - for (const [edsServiceName, watcherList] of this.watchers.entries()) { - if (!allEdsServiceNames.has(edsServiceName)) { - trace('Reporting EDS resource does not exist for edsServiceName ' + edsServiceName); - for (const watcher of watcherList) { - watcher.onResourceDoesNotExist(); - } - } - } - } - - handleResponses(responses: ClusterLoadAssignment__Output[]) { - for (const message of responses) { - if (!this.validateResponse(message)) { - trace('EDS validation failed for message ' + JSON.stringify(message)); - return 'EDS Error: ClusterLoadAssignment validation failed'; - } - } - this.latestResponses = responses; - const allClusterNames: Set = new Set(); - for (const message of responses) { - allClusterNames.add(message.cluster_name); - const watchers = this.watchers.get(message.cluster_name) ?? []; - for (const watcher of watchers) { - watcher.onValidUpdate(message); - } - } - trace('Received EDS updates for cluster names ' + Array.from(allClusterNames)); - this.handleMissingNames(allClusterNames); - return null; - } - - reportStreamError(status: StatusObject): void { - for (const watcherList of this.watchers.values()) { - for (const watcher of watcherList) { - watcher.onTransientError(status); - } - } - } -} \ No newline at end of file diff --git a/packages/grpc-js-xds/src/xds-stream-state/lds-state.ts b/packages/grpc-js-xds/src/xds-stream-state/lds-state.ts deleted file mode 100644 index 554712727..000000000 --- a/packages/grpc-js-xds/src/xds-stream-state/lds-state.ts +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -import * as protoLoader from '@grpc/proto-loader'; -import { experimental, logVerbosity, StatusObject } from "@grpc/grpc-js"; -import { Listener__Output } from "../generated/envoy/api/v2/Listener"; -import { RdsState } from "./rds-state"; -import { Watcher, XdsStreamState } from "./xds-stream-state"; -import { HttpConnectionManager__Output } from '../generated/envoy/config/filter/network/http_connection_manager/v2/HttpConnectionManager'; - -const TRACER_NAME = 'xds_client'; - -function trace(text: string): void { - experimental.trace(logVerbosity.DEBUG, TRACER_NAME, text); -} - -const HTTP_CONNECTION_MANGER_TYPE_URL = - 'type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager'; - -export class LdsState implements XdsStreamState { - versionInfo = ''; - nonce = ''; - - private watchers: Map[]> = new Map[]>(); - private latestResponses: Listener__Output[] = []; - - constructor(private rdsState: RdsState, private updateResourceNames: () => void) {} - - addWatcher(targetName: string, watcher: Watcher) { - trace('Adding RDS watcher for targetName ' + targetName); - let watchersEntry = this.watchers.get(targetName); - let addedServiceName = false; - if (watchersEntry === undefined) { - addedServiceName = true; - watchersEntry = []; - this.watchers.set(targetName, watchersEntry); - } - watchersEntry.push(watcher); - - /* If we have already received an update for the requested edsServiceName, - * immediately pass that update along to the watcher */ - for (const message of this.latestResponses) { - if (message.name === targetName) { - /* These updates normally occur asynchronously, so we ensure that - * the same happens here */ - process.nextTick(() => { - trace('Reporting existing RDS update for new watcher for targetName ' + targetName); - watcher.onValidUpdate(message); - }); - } - } - if (addedServiceName) { - this.updateResourceNames(); - } - } - - removeWatcher(targetName: string, watcher: Watcher): void { - trace('Removing RDS watcher for targetName ' + targetName); - const watchersEntry = this.watchers.get(targetName); - let removedServiceName = false; - if (watchersEntry !== undefined) { - const entryIndex = watchersEntry.indexOf(watcher); - if (entryIndex >= 0) { - watchersEntry.splice(entryIndex, 1); - } - if (watchersEntry.length === 0) { - removedServiceName = true; - this.watchers.delete(targetName); - } - } - if (removedServiceName) { - this.updateResourceNames(); - } - } - - getResourceNames(): string[] { - return Array.from(this.watchers.keys()); - } - - private validateResponse(message: Listener__Output): boolean { - if ( - !( - message.api_listener?.api_listener && - protoLoader.isAnyExtension(message.api_listener.api_listener) && - message.api_listener?.api_listener['@type'] === - HTTP_CONNECTION_MANGER_TYPE_URL - ) - ) { - return false; - } - const httpConnectionManager = message.api_listener - ?.api_listener as protoLoader.AnyExtension & - HttpConnectionManager__Output; - switch (httpConnectionManager.route_specifier) { - case 'rds': - return !!httpConnectionManager.rds?.config_source?.ads; - case 'route_config': - return this.rdsState.validateResponse(httpConnectionManager.route_config!); - } - return false; - } - - private handleMissingNames(allTargetNames: Set) { - for (const [targetName, watcherList] of this.watchers.entries()) { - if (!allTargetNames.has(targetName)) { - for (const watcher of watcherList) { - watcher.onResourceDoesNotExist(); - } - } - } - } - - handleResponses(responses: Listener__Output[]): string | null { - for (const message of responses) { - if (!this.validateResponse(message)) { - trace('LDS validation failed for message ' + JSON.stringify(message)); - return 'LDS Error: Route validation failed'; - } - } - this.latestResponses = responses; - const allTargetNames = new Set(); - for (const message of responses) { - allTargetNames.add(message.name); - const watchers = this.watchers.get(message.name) ?? []; - for (const watcher of watchers) { - watcher.onValidUpdate(message); - } - } - trace('Received RDS response with route config names ' + Array.from(allTargetNames)); - this.handleMissingNames(allTargetNames); - return null; - } - - reportStreamError(status: StatusObject): void { - for (const watcherList of this.watchers.values()) { - for (const watcher of watcherList) { - watcher.onTransientError(status); - } - } - } -} \ No newline at end of file diff --git a/packages/grpc-js-xds/src/xds-stream-state/rds-state.ts b/packages/grpc-js-xds/src/xds-stream-state/rds-state.ts deleted file mode 100644 index 8f795e0f5..000000000 --- a/packages/grpc-js-xds/src/xds-stream-state/rds-state.ts +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -import { experimental, logVerbosity, StatusObject } from "@grpc/grpc-js"; -import { RouteConfiguration__Output } from "../generated/envoy/api/v2/RouteConfiguration"; -import { CdsLoadBalancingConfig } from "../load-balancer-cds"; -import { Watcher, XdsStreamState } from "./xds-stream-state"; -import ServiceConfig = experimental.ServiceConfig; - -const TRACER_NAME = 'xds_client'; - -function trace(text: string): void { - experimental.trace(logVerbosity.DEBUG, TRACER_NAME, text); -} - -const SUPPORTED_PATH_SPECIFIERS = ['prefix', 'path', 'safe_regex']; -const SUPPPORTED_HEADER_MATCH_SPECIFIERS = [ - 'exact_match', - 'safe_regex_match', - 'range_match', - 'present_match', - 'prefix_match', - 'suffix_match']; -const SUPPORTED_CLUSTER_SPECIFIERS = ['cluster', 'weighted_clusters', 'cluster_header']; - -export class RdsState implements XdsStreamState { - versionInfo = ''; - nonce = ''; - - private watchers: Map[]> = new Map[]>(); - private latestResponses: RouteConfiguration__Output[] = []; - - constructor(private updateResourceNames: () => void) {} - - addWatcher(routeConfigName: string, watcher: Watcher) { - trace('Adding RDS watcher for routeConfigName ' + routeConfigName); - let watchersEntry = this.watchers.get(routeConfigName); - let addedServiceName = false; - if (watchersEntry === undefined) { - addedServiceName = true; - watchersEntry = []; - this.watchers.set(routeConfigName, watchersEntry); - } - watchersEntry.push(watcher); - - /* If we have already received an update for the requested edsServiceName, - * immediately pass that update along to the watcher */ - for (const message of this.latestResponses) { - if (message.name === routeConfigName) { - /* These updates normally occur asynchronously, so we ensure that - * the same happens here */ - process.nextTick(() => { - trace('Reporting existing RDS update for new watcher for routeConfigName ' + routeConfigName); - watcher.onValidUpdate(message); - }); - } - } - if (addedServiceName) { - this.updateResourceNames(); - } - } - - removeWatcher(routeConfigName: string, watcher: Watcher): void { - trace('Removing RDS watcher for routeConfigName ' + routeConfigName); - const watchersEntry = this.watchers.get(routeConfigName); - let removedServiceName = false; - if (watchersEntry !== undefined) { - const entryIndex = watchersEntry.indexOf(watcher); - if (entryIndex >= 0) { - watchersEntry.splice(entryIndex, 1); - } - if (watchersEntry.length === 0) { - removedServiceName = true; - this.watchers.delete(routeConfigName); - } - } - if (removedServiceName) { - this.updateResourceNames(); - } - } - - getResourceNames(): string[] { - return Array.from(this.watchers.keys()); - } - - validateResponse(message: RouteConfiguration__Output): boolean { - // https://github.com/grpc/proposal/blob/master/A28-xds-traffic-splitting-and-routing.md#response-validation - for (const virtualHost of message.virtual_hosts) { - for (const domainPattern of virtualHost.domains) { - const starIndex = domainPattern.indexOf('*'); - const lastStarIndex = domainPattern.lastIndexOf('*'); - // A domain pattern can have at most one wildcard * - if (starIndex !== lastStarIndex) { - return false; - } - // A wildcard * can either be absent or at the beginning or end of the pattern - if (!(starIndex === -1 || starIndex === 0 || starIndex === domainPattern.length - 1)) { - return false; - } - } - for (const route of virtualHost.routes) { - const match = route.match; - if (!match) { - return false; - } - if (SUPPORTED_PATH_SPECIFIERS.indexOf(match.path_specifier) < 0) { - return false; - } - for (const headers of match.headers) { - if (SUPPPORTED_HEADER_MATCH_SPECIFIERS.indexOf(headers.header_match_specifier) < 0) { - return false; - } - } - if (route.action !== 'route') { - return false; - } - if ((route.route === undefined) || SUPPORTED_CLUSTER_SPECIFIERS.indexOf(route.route.cluster_specifier) < 0) { - return false; - } - if (route.route!.cluster_specifier === 'weighted_clusters') { - let weightSum = 0; - for (const clusterWeight of route.route.weighted_clusters!.clusters) { - weightSum += clusterWeight.weight?.value ?? 0; - } - if (weightSum !== route.route.weighted_clusters!.total_weight?.value ?? 100) { - return false; - } - } - } - } - return true; - } - - private handleMissingNames(allRouteConfigNames: Set) { - for (const [routeConfigName, watcherList] of this.watchers.entries()) { - if (!allRouteConfigNames.has(routeConfigName)) { - for (const watcher of watcherList) { - watcher.onResourceDoesNotExist(); - } - } - } - } - - handleResponses(responses: RouteConfiguration__Output[]): string | null { - for (const message of responses) { - if (!this.validateResponse(message)) { - trace('RDS validation failed for message ' + JSON.stringify(message)); - return 'RDS Error: Route validation failed'; - } - } - this.latestResponses = responses; - const allRouteConfigNames = new Set(); - for (const message of responses) { - allRouteConfigNames.add(message.name); - const watchers = this.watchers.get(message.name) ?? []; - for (const watcher of watchers) { - watcher.onValidUpdate(message); - } - } - trace('Received RDS response with route config names ' + Array.from(allRouteConfigNames)); - this.handleMissingNames(allRouteConfigNames); - return null; - } - - reportStreamError(status: StatusObject): void { - for (const watcherList of this.watchers.values()) { - for (const watcher of watcherList) { - watcher.onTransientError(status); - } - } - } -} \ No newline at end of file diff --git a/packages/grpc-js-xds/src/xxhash.ts b/packages/grpc-js-xds/src/xxhash.ts new file mode 100644 index 000000000..63f68af2b --- /dev/null +++ b/packages/grpc-js-xds/src/xxhash.ts @@ -0,0 +1,31 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* The simpler `import xxhash from 'xxhash-wasm';` doesn't compile correctly + * to CommonJS require calls for some reason, so we use this import to get + * the type, and then an explicit require call to get the actual value. */ +import xxhashImport from 'xxhash-wasm'; +const xxhash: typeof xxhashImport = require('xxhash-wasm'); + +export let xxhashApi: Awaited> | null = null; + +export async function loadXxhashApi() { + if (!xxhashApi) { + xxhashApi = await xxhash(); + } + return xxhashApi; +} diff --git a/packages/grpc-js-xds/test/backend.ts b/packages/grpc-js-xds/test/backend.ts new file mode 100644 index 000000000..59c23ad7d --- /dev/null +++ b/packages/grpc-js-xds/test/backend.ts @@ -0,0 +1,139 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { loadPackageDefinition, sendUnaryData, Server, ServerCredentials, ServerOptions, ServerUnaryCall, UntypedServiceImplementation } from "@grpc/grpc-js"; +import { loadSync } from "@grpc/proto-loader"; +import { ProtoGrpcType } from "./generated/echo"; +import { EchoRequest__Output } from "./generated/grpc/testing/EchoRequest"; +import { EchoResponse } from "./generated/grpc/testing/EchoResponse"; + +const loadedProtos = loadPackageDefinition(loadSync( + [ + 'grpc/testing/echo.proto' + ], + { + keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true, + json: true, + includeDirs: [ + // Paths are relative to build/test + __dirname + '/../../proto/' + ], + })) as unknown as ProtoGrpcType; + +export class Backend { + private server: Server | null = null; + private receivedCallCount = 0; + private callListeners: (() => void)[] = []; + private port: number | null = null; + constructor(private serverOptions?: ServerOptions) { + } + Echo(call: ServerUnaryCall, callback: sendUnaryData) { + // call.request.params is currently ignored + this.addCall(); + for (const behaviorEntry of call.metadata.get('rpc-behavior')) { + if (typeof behaviorEntry !== 'string') { + continue; + } + for (const behavior of behaviorEntry.split(',')) { + if (behavior.startsWith('error-code-')) { + const errorCode = Number(behavior.substring('error-code-'.length)); + callback({code: errorCode, details: 'rpc-behavior error code'}); + return; + } + } + } + callback(null, {message: call.request.message}); + } + + addCall() { + this.receivedCallCount++; + this.callListeners.forEach(listener => listener()); + } + + onCall(listener: () => void) { + this.callListeners.push(listener); + } + + start(callback: (error: Error | null, port: number) => void) { + if (this.server) { + throw new Error("Backend already running"); + } + this.server = new Server(this.serverOptions); + this.server.addService(loadedProtos.grpc.testing.EchoTestService.service, this as unknown as UntypedServiceImplementation); + const boundPort = this.port ?? 0; + this.server.bindAsync(`localhost:${boundPort}`, ServerCredentials.createInsecure(), (error, port) => { + if (!error) { + this.port = port; + } + callback(error, port); + }) + } + + startAsync(): Promise { + return new Promise((resolve, reject) => { + this.start((error, port) => { + if (error) { + reject(error); + } else { + resolve(port); + } + }); + }); + } + + getPort(): number { + if (this.port === null) { + throw new Error('Port not set. Backend not yet started.'); + } + return this.port; + } + + getCallCount() { + return this.receivedCallCount; + } + + resetCallCount() { + this.receivedCallCount = 0; + } + + shutdown(callback: (error?: Error) => void) { + if (this.server) { + this.server.tryShutdown(error => { + this.server = null; + callback(error); + }); + } else { + process.nextTick(callback); + } + } + + shutdownAsync(): Promise { + return new Promise((resolve, reject) => { + this.shutdown(error => { + if (error) { + reject(error); + } else { + resolve(); + } + }); + }); + } +} diff --git a/packages/grpc-js-xds/test/client.ts b/packages/grpc-js-xds/test/client.ts new file mode 100644 index 000000000..0779702bb --- /dev/null +++ b/packages/grpc-js-xds/test/client.ts @@ -0,0 +1,105 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { ChannelOptions, credentials, loadPackageDefinition, ServiceError } from "@grpc/grpc-js"; +import { loadSync } from "@grpc/proto-loader"; +import { ProtoGrpcType } from "./generated/echo"; +import { EchoTestServiceClient } from "./generated/grpc/testing/EchoTestService"; +import { XdsServer } from "./xds-server"; + +const loadedProtos = loadPackageDefinition(loadSync( + [ + 'grpc/testing/echo.proto' + ], + { + keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true, + json: true, + includeDirs: [ + // Paths are relative to build/test + __dirname + '/../../proto/' + ], + })) as unknown as ProtoGrpcType; + +const BOOTSTRAP_CONFIG_KEY = 'grpc.TEST_ONLY_DO_NOT_USE_IN_PROD.xds_bootstrap_config'; + +export class XdsTestClient { + private client: EchoTestServiceClient; + private callInterval: NodeJS.Timer; + + constructor(target: string, bootstrapInfo: string, options?: ChannelOptions) { + this.client = new loadedProtos.grpc.testing.EchoTestService(target, credentials.createInsecure(), {...options, [BOOTSTRAP_CONFIG_KEY]: bootstrapInfo}); + this.callInterval = setInterval(() => {}, 0); + clearInterval(this.callInterval); + } + + static createFromServer(targetName: string, xdsServer: XdsServer, options?: ChannelOptions) { + return new XdsTestClient(`xds:///${targetName}`, xdsServer.getBootstrapInfoString(), options); + } + + startCalls(interval: number) { + clearInterval(this.callInterval); + this.callInterval = setInterval(() => { + this.client.echo({message: 'test'}, (error, value) => { + if (error) { + throw error; + } + }); + }, interval); + } + + stopCalls() { + clearInterval(this.callInterval); + } + + close() { + this.stopCalls(); + this.client.close(); + } + + sendOneCall(callback: (error: ServiceError | null) => void) { + const deadline = new Date(); + deadline.setMilliseconds(deadline.getMilliseconds() + 500); + this.client.echo({message: 'test'}, {deadline}, (error, value) => { + callback(error); + }); + } + + sendNCalls(count: number, callback: (error: ServiceError| null) => void) { + const sendInner = (count: number, callback: (error: ServiceError| null) => void) => { + if (count === 0) { + callback(null); + return; + } + this.sendOneCall(error => { + if (error) { + callback(error); + return; + } + sendInner(count-1, callback); + }); + } + sendInner(count, callback); + } + + getConnectivityState() { + return this.client.getChannel().getConnectivityState(false); + } +} diff --git a/packages/grpc-js-xds/test/framework.ts b/packages/grpc-js-xds/test/framework.ts new file mode 100644 index 000000000..bd6f270d6 --- /dev/null +++ b/packages/grpc-js-xds/test/framework.ts @@ -0,0 +1,361 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { ClusterLoadAssignment } from "../src/generated/envoy/config/endpoint/v3/ClusterLoadAssignment"; +import { Cluster } from "../src/generated/envoy/config/cluster/v3/Cluster"; +import { Backend } from "./backend"; +import { Locality } from "../src/generated/envoy/config/core/v3/Locality"; +import { RouteConfiguration } from "../src/generated/envoy/config/route/v3/RouteConfiguration"; +import { Route } from "../src/generated/envoy/config/route/v3/Route"; +import { Listener } from "../src/generated/envoy/config/listener/v3/Listener"; +import { HttpConnectionManager } from "../src/generated/envoy/extensions/filters/network/http_connection_manager/v3/HttpConnectionManager"; +import { AnyExtension } from "@grpc/proto-loader"; +import { CLUSTER_CONFIG_TYPE_URL, HTTP_CONNECTION_MANGER_TYPE_URL } from "../src/resources"; +import { LocalityLbEndpoints } from "../src/generated/envoy/config/endpoint/v3/LocalityLbEndpoints"; +import { LbEndpoint } from "../src/generated/envoy/config/endpoint/v3/LbEndpoint"; +import { ClusterConfig } from "../src/generated/envoy/extensions/clusters/aggregate/v3/ClusterConfig"; +import { Any } from "../src/generated/google/protobuf/Any"; + +interface Endpoint { + locality: Locality; + backends: Backend[]; + weight?: number; + priority?: number; +} + +function getLbEndpoint(backend: Backend): LbEndpoint { + return { + health_status: "HEALTHY", + endpoint: { + address: { + socket_address: { + address: '::1', + port_value: backend.getPort() + } + } + } + }; +} + +function getLocalityLbEndpoints(endpoint: Endpoint): LocalityLbEndpoints { + return { + lb_endpoints: endpoint.backends.map(getLbEndpoint), + locality: endpoint.locality, + load_balancing_weight: {value: endpoint.weight ?? 1}, + priority: endpoint.priority ?? 0 + } +} + +export interface FakeCluster { + getClusterConfig(): Cluster; + getAllClusterConfigs(): Cluster[]; + getName(): string; + startAllBackends(): Promise; + haveAllBackendsReceivedTraffic(): boolean; + waitForAllBackendsToReceiveTraffic(): Promise; +} + +export class FakeEdsCluster implements FakeCluster { + constructor(private clusterName: string, private endpointName: string, private endpoints: Endpoint[], private loadBalancingPolicyOverride?: Any | 'RING_HASH') {} + + getEndpointConfig(): ClusterLoadAssignment { + return { + cluster_name: this.endpointName, + endpoints: this.endpoints.map(getLocalityLbEndpoints) + }; + } + + getClusterConfig(): Cluster { + const result: Cluster = { + name: this.clusterName, + type: 'EDS', + eds_cluster_config: {eds_config: {ads: {}}, service_name: this.endpointName}, + lrs_server: {self: {}}, + circuit_breakers: { + thresholds: [ + { + priority: 'DEFAULT', + max_requests: {value: 1000} + } + ] + } + }; + if (this.loadBalancingPolicyOverride === 'RING_HASH') { + result.lb_policy = 'RING_HASH'; + } else if (this.loadBalancingPolicyOverride) { + result.load_balancing_policy = { + policies: [ + { + typed_extension_config: { + 'name': 'test', + typed_config: this.loadBalancingPolicyOverride + } + } + ] + } + } else { + result.lb_policy = 'ROUND_ROBIN'; + } + return result; + } + + getAllClusterConfigs(): Cluster[] { + return [this.getClusterConfig()]; + } + + getName() { + return this.clusterName; + } + + startAllBackends(): Promise { + return Promise.all(this.endpoints.map(endpoint => Promise.all(endpoint.backends.map(backend => backend.startAsync())))); + } + + haveAllBackendsReceivedTraffic(): boolean { + for (const endpoint of this.endpoints) { + for (const backend of endpoint.backends) { + if (backend.getCallCount() < 1) { + return false; + } + } + } + return true; + } + + waitForAllBackendsToReceiveTraffic(): Promise { + for (const endpoint of this.endpoints) { + for (const backend of endpoint.backends) { + backend.resetCallCount(); + } + } + return new Promise((resolve, reject) => { + let finishedPromise = false; + for (const endpoint of this.endpoints) { + for (const backend of endpoint.backends) { + backend.onCall(() => { + if (finishedPromise) { + return; + } + if (this.haveAllBackendsReceivedTraffic()) { + finishedPromise = true; + resolve(); + } + }); + } + } + }); + } +} + +export class FakeDnsCluster implements FakeCluster { + constructor(private name: string, private backend: Backend) {} + + getClusterConfig(): Cluster { + return { + name: this.name, + type: 'LOGICAL_DNS', + lb_policy: 'ROUND_ROBIN', + load_assignment: { + endpoints: [{ + lb_endpoints: [{ + endpoint: { + address: { + socket_address: { + address: 'localhost', + port_value: this.backend.getPort() + } + } + } + }] + }] + }, + lrs_server: {self: {}} + }; + } + getAllClusterConfigs(): Cluster[] { + return [this.getClusterConfig()]; + } + getName(): string { + return this.name; + } + startAllBackends(): Promise { + return this.backend.startAsync(); + } + haveAllBackendsReceivedTraffic(): boolean { + return this.backend.getCallCount() > 0; + } + waitForAllBackendsToReceiveTraffic(): Promise { + return new Promise((resolve, reject) => { + this.backend.onCall(resolve); + }); + } +} + +export class FakeAggregateCluster implements FakeCluster { + constructor(private name: string, private children: FakeCluster[]) {} + + getClusterConfig(): Cluster { + const clusterConfig: ClusterConfig & AnyExtension = { + '@type': CLUSTER_CONFIG_TYPE_URL, + clusters: this.children.map(child => child.getName()) + }; + return { + name: this.name, + lb_policy: 'ROUND_ROBIN', + cluster_type: { + typed_config: clusterConfig + } + } + } + getAllClusterConfigs(): Cluster[] { + const allConfigs = [this.getClusterConfig()]; + for (const child of this.children) { + allConfigs.push(...child.getAllClusterConfigs()); + } + return allConfigs; + } + getName(): string { + return this.name; + } + startAllBackends(): Promise { + return Promise.all(this.children.map(child => child.startAllBackends())); + } + haveAllBackendsReceivedTraffic(): boolean { + for (const child of this.children) { + if (!child.haveAllBackendsReceivedTraffic()) { + return false; + } + } + return true; + } + waitForAllBackendsToReceiveTraffic(): Promise { + return Promise.all(this.children.map(child => child.waitForAllBackendsToReceiveTraffic())).then(() => {}); + } +} + +interface FakeRoute { + cluster?: FakeCluster; + weightedClusters?: [{cluster: FakeCluster, weight: number}]; +} + +function createRouteConfig(route: FakeRoute): Route { + if (route.cluster) { + return { + match: { + prefix: '' + }, + route: { + cluster: route.cluster.getName(), + // Default to consistent hash + hash_policy: [{ + filter_state: { + key: 'io.grpc.channel_id' + } + }] + }, + }; + } else { + return { + match: { + prefix: '' + }, + route: { + weighted_clusters: { + clusters: route.weightedClusters!.map(clusterWeight => ({ + name: clusterWeight.cluster.getName(), + weight: {value: clusterWeight.weight} + })) + }, + // Default to consistent hash + hash_policy: [{ + filter_state: { + key: 'io.grpc.channel_id' + } + }] + } + } + } +} + +export class FakeRouteGroup { + constructor(private listenerName: string, private routeName: string, private routes: FakeRoute[]) {} + + getRouteConfiguration(): RouteConfiguration { + return { + name: this.routeName, + virtual_hosts: [{ + domains: ['*'], + routes: this.routes.map(createRouteConfig) + }] + }; + } + + getListener(): Listener { + const httpConnectionManager: HttpConnectionManager & AnyExtension = { + '@type': HTTP_CONNECTION_MANGER_TYPE_URL, + rds: { + route_config_name: this.routeName, + config_source: {ads: {}} + } + } + return { + name: this.listenerName, + api_listener: { + api_listener: httpConnectionManager + } + }; + } + + startAllBackends(): Promise { + return Promise.all(this.routes.map(route => { + if (route.cluster) { + return route.cluster.startAllBackends(); + } else if (route.weightedClusters) { + return Promise.all(route.weightedClusters.map(clusterWeight => clusterWeight.cluster.startAllBackends())); + } else { + return Promise.resolve(); + } + })); + } + + haveAllBackendsReceivedTraffic(): boolean { + for (const route of this.routes) { + if (route.cluster) { + return route.cluster.haveAllBackendsReceivedTraffic(); + } else if (route.weightedClusters) { + for (const weightedCluster of route.weightedClusters) { + if (!weightedCluster.cluster.haveAllBackendsReceivedTraffic()) { + return false; + } + } + } + } + return true; + } + + waitForAllBackendsToReceiveTraffic(): Promise { + return Promise.all(this.routes.map(route => { + if (route.cluster) { + return route.cluster.waitForAllBackendsToReceiveTraffic(); + } else if (route.weightedClusters) { + return Promise.all(route.weightedClusters.map(clusterWeight => clusterWeight.cluster.waitForAllBackendsToReceiveTraffic())).then(() => {}); + } else { + return Promise.resolve(); + } + })); + } +} diff --git a/packages/grpc-js-xds/test/generated/echo.ts b/packages/grpc-js-xds/test/generated/echo.ts new file mode 100644 index 000000000..537a49cfa --- /dev/null +++ b/packages/grpc-js-xds/test/generated/echo.ts @@ -0,0 +1,46 @@ +import type * as grpc from '@grpc/grpc-js'; +import type { MessageTypeDefinition } from '@grpc/proto-loader'; + +import type { EchoTest1ServiceClient as _grpc_testing_EchoTest1ServiceClient, EchoTest1ServiceDefinition as _grpc_testing_EchoTest1ServiceDefinition } from './grpc/testing/EchoTest1Service'; +import type { EchoTest2ServiceClient as _grpc_testing_EchoTest2ServiceClient, EchoTest2ServiceDefinition as _grpc_testing_EchoTest2ServiceDefinition } from './grpc/testing/EchoTest2Service'; +import type { EchoTestServiceClient as _grpc_testing_EchoTestServiceClient, EchoTestServiceDefinition as _grpc_testing_EchoTestServiceDefinition } from './grpc/testing/EchoTestService'; +import type { NoRpcServiceClient as _grpc_testing_NoRpcServiceClient, NoRpcServiceDefinition as _grpc_testing_NoRpcServiceDefinition } from './grpc/testing/NoRpcService'; +import type { UnimplementedEchoServiceClient as _grpc_testing_UnimplementedEchoServiceClient, UnimplementedEchoServiceDefinition as _grpc_testing_UnimplementedEchoServiceDefinition } from './grpc/testing/UnimplementedEchoService'; + +type SubtypeConstructor any, Subtype> = { + new(...args: ConstructorParameters): Subtype; +}; + +export interface ProtoGrpcType { + grpc: { + testing: { + DebugInfo: MessageTypeDefinition + EchoRequest: MessageTypeDefinition + EchoResponse: MessageTypeDefinition + EchoTest1Service: SubtypeConstructor & { service: _grpc_testing_EchoTest1ServiceDefinition } + EchoTest2Service: SubtypeConstructor & { service: _grpc_testing_EchoTest2ServiceDefinition } + EchoTestService: SubtypeConstructor & { service: _grpc_testing_EchoTestServiceDefinition } + ErrorStatus: MessageTypeDefinition + /** + * A service without any rpc defined to test coverage. + */ + NoRpcService: SubtypeConstructor & { service: _grpc_testing_NoRpcServiceDefinition } + RequestParams: MessageTypeDefinition + ResponseParams: MessageTypeDefinition + SimpleRequest: MessageTypeDefinition + SimpleResponse: MessageTypeDefinition + StringValue: MessageTypeDefinition + UnimplementedEchoService: SubtypeConstructor & { service: _grpc_testing_UnimplementedEchoServiceDefinition } + } + } + xds: { + data: { + orca: { + v3: { + OrcaLoadReport: MessageTypeDefinition + } + } + } + } +} + diff --git a/packages/grpc-js-xds/test/generated/grpc/testing/DebugInfo.ts b/packages/grpc-js-xds/test/generated/grpc/testing/DebugInfo.ts new file mode 100644 index 000000000..123188fe3 --- /dev/null +++ b/packages/grpc-js-xds/test/generated/grpc/testing/DebugInfo.ts @@ -0,0 +1,18 @@ +// Original file: proto/grpc/testing/echo_messages.proto + + +/** + * Message to be echoed back serialized in trailer. + */ +export interface DebugInfo { + 'stack_entries'?: (string)[]; + 'detail'?: (string); +} + +/** + * Message to be echoed back serialized in trailer. + */ +export interface DebugInfo__Output { + 'stack_entries': (string)[]; + 'detail': (string); +} diff --git a/packages/grpc-js-xds/test/generated/grpc/testing/EchoRequest.ts b/packages/grpc-js-xds/test/generated/grpc/testing/EchoRequest.ts new file mode 100644 index 000000000..cadf04f7a --- /dev/null +++ b/packages/grpc-js-xds/test/generated/grpc/testing/EchoRequest.ts @@ -0,0 +1,13 @@ +// Original file: proto/grpc/testing/echo_messages.proto + +import type { RequestParams as _grpc_testing_RequestParams, RequestParams__Output as _grpc_testing_RequestParams__Output } from '../../grpc/testing/RequestParams'; + +export interface EchoRequest { + 'message'?: (string); + 'param'?: (_grpc_testing_RequestParams | null); +} + +export interface EchoRequest__Output { + 'message': (string); + 'param': (_grpc_testing_RequestParams__Output | null); +} diff --git a/packages/grpc-js-xds/test/generated/grpc/testing/EchoResponse.ts b/packages/grpc-js-xds/test/generated/grpc/testing/EchoResponse.ts new file mode 100644 index 000000000..d54beaf4f --- /dev/null +++ b/packages/grpc-js-xds/test/generated/grpc/testing/EchoResponse.ts @@ -0,0 +1,13 @@ +// Original file: proto/grpc/testing/echo_messages.proto + +import type { ResponseParams as _grpc_testing_ResponseParams, ResponseParams__Output as _grpc_testing_ResponseParams__Output } from '../../grpc/testing/ResponseParams'; + +export interface EchoResponse { + 'message'?: (string); + 'param'?: (_grpc_testing_ResponseParams | null); +} + +export interface EchoResponse__Output { + 'message': (string); + 'param': (_grpc_testing_ResponseParams__Output | null); +} diff --git a/packages/grpc-js-xds/test/generated/grpc/testing/EchoTest1Service.ts b/packages/grpc-js-xds/test/generated/grpc/testing/EchoTest1Service.ts new file mode 100644 index 000000000..a2b1947f6 --- /dev/null +++ b/packages/grpc-js-xds/test/generated/grpc/testing/EchoTest1Service.ts @@ -0,0 +1,117 @@ +// Original file: proto/grpc/testing/echo.proto + +import type * as grpc from '@grpc/grpc-js' +import type { MethodDefinition } from '@grpc/proto-loader' +import type { EchoRequest as _grpc_testing_EchoRequest, EchoRequest__Output as _grpc_testing_EchoRequest__Output } from '../../grpc/testing/EchoRequest'; +import type { EchoResponse as _grpc_testing_EchoResponse, EchoResponse__Output as _grpc_testing_EchoResponse__Output } from '../../grpc/testing/EchoResponse'; +import type { SimpleRequest as _grpc_testing_SimpleRequest, SimpleRequest__Output as _grpc_testing_SimpleRequest__Output } from '../../grpc/testing/SimpleRequest'; +import type { SimpleResponse as _grpc_testing_SimpleResponse, SimpleResponse__Output as _grpc_testing_SimpleResponse__Output } from '../../grpc/testing/SimpleResponse'; + +export interface EchoTest1ServiceClient extends grpc.Client { + BidiStream(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse__Output>; + BidiStream(options?: grpc.CallOptions): grpc.ClientDuplexStream<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse__Output>; + bidiStream(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse__Output>; + bidiStream(options?: grpc.CallOptions): grpc.ClientDuplexStream<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse__Output>; + + /** + * A service which checks that the initial metadata sent over contains some + * expected key value pair + */ + CheckClientInitialMetadata(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + CheckClientInitialMetadata(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + CheckClientInitialMetadata(argument: _grpc_testing_SimpleRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + CheckClientInitialMetadata(argument: _grpc_testing_SimpleRequest, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + /** + * A service which checks that the initial metadata sent over contains some + * expected key value pair + */ + checkClientInitialMetadata(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + checkClientInitialMetadata(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + checkClientInitialMetadata(argument: _grpc_testing_SimpleRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + checkClientInitialMetadata(argument: _grpc_testing_SimpleRequest, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + + Echo(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + + Echo1(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo1(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo1(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo1(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo1(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo1(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo1(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo1(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + + Echo2(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo2(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo2(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo2(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo2(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo2(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo2(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo2(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + + RequestStream(metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + RequestStream(metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + RequestStream(options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + RequestStream(callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + requestStream(metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + requestStream(metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + requestStream(options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + requestStream(callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + + ResponseStream(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientReadableStream<_grpc_testing_EchoResponse__Output>; + ResponseStream(argument: _grpc_testing_EchoRequest, options?: grpc.CallOptions): grpc.ClientReadableStream<_grpc_testing_EchoResponse__Output>; + responseStream(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientReadableStream<_grpc_testing_EchoResponse__Output>; + responseStream(argument: _grpc_testing_EchoRequest, options?: grpc.CallOptions): grpc.ClientReadableStream<_grpc_testing_EchoResponse__Output>; + + Unimplemented(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Unimplemented(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Unimplemented(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Unimplemented(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + unimplemented(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + unimplemented(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + unimplemented(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + unimplemented(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + +} + +export interface EchoTest1ServiceHandlers extends grpc.UntypedServiceImplementation { + BidiStream: grpc.handleBidiStreamingCall<_grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse>; + + /** + * A service which checks that the initial metadata sent over contains some + * expected key value pair + */ + CheckClientInitialMetadata: grpc.handleUnaryCall<_grpc_testing_SimpleRequest__Output, _grpc_testing_SimpleResponse>; + + Echo: grpc.handleUnaryCall<_grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse>; + + Echo1: grpc.handleUnaryCall<_grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse>; + + Echo2: grpc.handleUnaryCall<_grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse>; + + RequestStream: grpc.handleClientStreamingCall<_grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse>; + + ResponseStream: grpc.handleServerStreamingCall<_grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse>; + + Unimplemented: grpc.handleUnaryCall<_grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse>; + +} + +export interface EchoTest1ServiceDefinition extends grpc.ServiceDefinition { + BidiStream: MethodDefinition<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse, _grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse__Output> + CheckClientInitialMetadata: MethodDefinition<_grpc_testing_SimpleRequest, _grpc_testing_SimpleResponse, _grpc_testing_SimpleRequest__Output, _grpc_testing_SimpleResponse__Output> + Echo: MethodDefinition<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse, _grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse__Output> + Echo1: MethodDefinition<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse, _grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse__Output> + Echo2: MethodDefinition<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse, _grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse__Output> + RequestStream: MethodDefinition<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse, _grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse__Output> + ResponseStream: MethodDefinition<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse, _grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse__Output> + Unimplemented: MethodDefinition<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse, _grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse__Output> +} diff --git a/packages/grpc-js-xds/test/generated/grpc/testing/EchoTest2Service.ts b/packages/grpc-js-xds/test/generated/grpc/testing/EchoTest2Service.ts new file mode 100644 index 000000000..033e70143 --- /dev/null +++ b/packages/grpc-js-xds/test/generated/grpc/testing/EchoTest2Service.ts @@ -0,0 +1,117 @@ +// Original file: proto/grpc/testing/echo.proto + +import type * as grpc from '@grpc/grpc-js' +import type { MethodDefinition } from '@grpc/proto-loader' +import type { EchoRequest as _grpc_testing_EchoRequest, EchoRequest__Output as _grpc_testing_EchoRequest__Output } from '../../grpc/testing/EchoRequest'; +import type { EchoResponse as _grpc_testing_EchoResponse, EchoResponse__Output as _grpc_testing_EchoResponse__Output } from '../../grpc/testing/EchoResponse'; +import type { SimpleRequest as _grpc_testing_SimpleRequest, SimpleRequest__Output as _grpc_testing_SimpleRequest__Output } from '../../grpc/testing/SimpleRequest'; +import type { SimpleResponse as _grpc_testing_SimpleResponse, SimpleResponse__Output as _grpc_testing_SimpleResponse__Output } from '../../grpc/testing/SimpleResponse'; + +export interface EchoTest2ServiceClient extends grpc.Client { + BidiStream(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse__Output>; + BidiStream(options?: grpc.CallOptions): grpc.ClientDuplexStream<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse__Output>; + bidiStream(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse__Output>; + bidiStream(options?: grpc.CallOptions): grpc.ClientDuplexStream<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse__Output>; + + /** + * A service which checks that the initial metadata sent over contains some + * expected key value pair + */ + CheckClientInitialMetadata(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + CheckClientInitialMetadata(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + CheckClientInitialMetadata(argument: _grpc_testing_SimpleRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + CheckClientInitialMetadata(argument: _grpc_testing_SimpleRequest, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + /** + * A service which checks that the initial metadata sent over contains some + * expected key value pair + */ + checkClientInitialMetadata(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + checkClientInitialMetadata(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + checkClientInitialMetadata(argument: _grpc_testing_SimpleRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + checkClientInitialMetadata(argument: _grpc_testing_SimpleRequest, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + + Echo(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + + Echo1(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo1(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo1(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo1(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo1(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo1(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo1(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo1(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + + Echo2(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo2(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo2(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo2(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo2(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo2(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo2(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo2(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + + RequestStream(metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + RequestStream(metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + RequestStream(options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + RequestStream(callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + requestStream(metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + requestStream(metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + requestStream(options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + requestStream(callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + + ResponseStream(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientReadableStream<_grpc_testing_EchoResponse__Output>; + ResponseStream(argument: _grpc_testing_EchoRequest, options?: grpc.CallOptions): grpc.ClientReadableStream<_grpc_testing_EchoResponse__Output>; + responseStream(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientReadableStream<_grpc_testing_EchoResponse__Output>; + responseStream(argument: _grpc_testing_EchoRequest, options?: grpc.CallOptions): grpc.ClientReadableStream<_grpc_testing_EchoResponse__Output>; + + Unimplemented(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Unimplemented(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Unimplemented(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Unimplemented(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + unimplemented(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + unimplemented(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + unimplemented(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + unimplemented(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + +} + +export interface EchoTest2ServiceHandlers extends grpc.UntypedServiceImplementation { + BidiStream: grpc.handleBidiStreamingCall<_grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse>; + + /** + * A service which checks that the initial metadata sent over contains some + * expected key value pair + */ + CheckClientInitialMetadata: grpc.handleUnaryCall<_grpc_testing_SimpleRequest__Output, _grpc_testing_SimpleResponse>; + + Echo: grpc.handleUnaryCall<_grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse>; + + Echo1: grpc.handleUnaryCall<_grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse>; + + Echo2: grpc.handleUnaryCall<_grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse>; + + RequestStream: grpc.handleClientStreamingCall<_grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse>; + + ResponseStream: grpc.handleServerStreamingCall<_grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse>; + + Unimplemented: grpc.handleUnaryCall<_grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse>; + +} + +export interface EchoTest2ServiceDefinition extends grpc.ServiceDefinition { + BidiStream: MethodDefinition<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse, _grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse__Output> + CheckClientInitialMetadata: MethodDefinition<_grpc_testing_SimpleRequest, _grpc_testing_SimpleResponse, _grpc_testing_SimpleRequest__Output, _grpc_testing_SimpleResponse__Output> + Echo: MethodDefinition<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse, _grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse__Output> + Echo1: MethodDefinition<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse, _grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse__Output> + Echo2: MethodDefinition<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse, _grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse__Output> + RequestStream: MethodDefinition<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse, _grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse__Output> + ResponseStream: MethodDefinition<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse, _grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse__Output> + Unimplemented: MethodDefinition<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse, _grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse__Output> +} diff --git a/packages/grpc-js-xds/test/generated/grpc/testing/EchoTestService.ts b/packages/grpc-js-xds/test/generated/grpc/testing/EchoTestService.ts new file mode 100644 index 000000000..d1fa2d075 --- /dev/null +++ b/packages/grpc-js-xds/test/generated/grpc/testing/EchoTestService.ts @@ -0,0 +1,150 @@ +// Original file: proto/grpc/testing/echo.proto + +import type * as grpc from '@grpc/grpc-js' +import type { MethodDefinition } from '@grpc/proto-loader' +import type { EchoRequest as _grpc_testing_EchoRequest, EchoRequest__Output as _grpc_testing_EchoRequest__Output } from '../../grpc/testing/EchoRequest'; +import type { EchoResponse as _grpc_testing_EchoResponse, EchoResponse__Output as _grpc_testing_EchoResponse__Output } from '../../grpc/testing/EchoResponse'; +import type { SimpleRequest as _grpc_testing_SimpleRequest, SimpleRequest__Output as _grpc_testing_SimpleRequest__Output } from '../../grpc/testing/SimpleRequest'; +import type { SimpleResponse as _grpc_testing_SimpleResponse, SimpleResponse__Output as _grpc_testing_SimpleResponse__Output } from '../../grpc/testing/SimpleResponse'; +import type { StringValue as _grpc_testing_StringValue, StringValue__Output as _grpc_testing_StringValue__Output } from '../../grpc/testing/StringValue'; + +export interface EchoTestServiceClient extends grpc.Client { + BidiStream(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse__Output>; + BidiStream(options?: grpc.CallOptions): grpc.ClientDuplexStream<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse__Output>; + bidiStream(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse__Output>; + bidiStream(options?: grpc.CallOptions): grpc.ClientDuplexStream<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse__Output>; + + /** + * A service which checks that the initial metadata sent over contains some + * expected key value pair + */ + CheckClientInitialMetadata(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + CheckClientInitialMetadata(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + CheckClientInitialMetadata(argument: _grpc_testing_SimpleRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + CheckClientInitialMetadata(argument: _grpc_testing_SimpleRequest, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + /** + * A service which checks that the initial metadata sent over contains some + * expected key value pair + */ + checkClientInitialMetadata(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + checkClientInitialMetadata(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + checkClientInitialMetadata(argument: _grpc_testing_SimpleRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + checkClientInitialMetadata(argument: _grpc_testing_SimpleRequest, callback: grpc.requestCallback<_grpc_testing_SimpleResponse__Output>): grpc.ClientUnaryCall; + + CheckDeadlineSet(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_StringValue__Output>): grpc.ClientUnaryCall; + CheckDeadlineSet(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_StringValue__Output>): grpc.ClientUnaryCall; + CheckDeadlineSet(argument: _grpc_testing_SimpleRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_StringValue__Output>): grpc.ClientUnaryCall; + CheckDeadlineSet(argument: _grpc_testing_SimpleRequest, callback: grpc.requestCallback<_grpc_testing_StringValue__Output>): grpc.ClientUnaryCall; + checkDeadlineSet(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_StringValue__Output>): grpc.ClientUnaryCall; + checkDeadlineSet(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_StringValue__Output>): grpc.ClientUnaryCall; + checkDeadlineSet(argument: _grpc_testing_SimpleRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_StringValue__Output>): grpc.ClientUnaryCall; + checkDeadlineSet(argument: _grpc_testing_SimpleRequest, callback: grpc.requestCallback<_grpc_testing_StringValue__Output>): grpc.ClientUnaryCall; + + CheckDeadlineUpperBound(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_StringValue__Output>): grpc.ClientUnaryCall; + CheckDeadlineUpperBound(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_StringValue__Output>): grpc.ClientUnaryCall; + CheckDeadlineUpperBound(argument: _grpc_testing_SimpleRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_StringValue__Output>): grpc.ClientUnaryCall; + CheckDeadlineUpperBound(argument: _grpc_testing_SimpleRequest, callback: grpc.requestCallback<_grpc_testing_StringValue__Output>): grpc.ClientUnaryCall; + checkDeadlineUpperBound(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_StringValue__Output>): grpc.ClientUnaryCall; + checkDeadlineUpperBound(argument: _grpc_testing_SimpleRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_StringValue__Output>): grpc.ClientUnaryCall; + checkDeadlineUpperBound(argument: _grpc_testing_SimpleRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_StringValue__Output>): grpc.ClientUnaryCall; + checkDeadlineUpperBound(argument: _grpc_testing_SimpleRequest, callback: grpc.requestCallback<_grpc_testing_StringValue__Output>): grpc.ClientUnaryCall; + + Echo(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + + Echo1(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo1(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo1(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo1(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo1(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo1(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo1(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo1(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + + Echo2(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo2(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo2(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Echo2(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo2(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo2(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo2(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + echo2(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + + RequestStream(metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + RequestStream(metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + RequestStream(options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + RequestStream(callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + requestStream(metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + requestStream(metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + requestStream(options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + requestStream(callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientWritableStream<_grpc_testing_EchoRequest>; + + ResponseStream(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientReadableStream<_grpc_testing_EchoResponse__Output>; + ResponseStream(argument: _grpc_testing_EchoRequest, options?: grpc.CallOptions): grpc.ClientReadableStream<_grpc_testing_EchoResponse__Output>; + responseStream(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientReadableStream<_grpc_testing_EchoResponse__Output>; + responseStream(argument: _grpc_testing_EchoRequest, options?: grpc.CallOptions): grpc.ClientReadableStream<_grpc_testing_EchoResponse__Output>; + + Unimplemented(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Unimplemented(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Unimplemented(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Unimplemented(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + unimplemented(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + unimplemented(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + unimplemented(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + unimplemented(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + + UnimplementedBidi(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse__Output>; + UnimplementedBidi(options?: grpc.CallOptions): grpc.ClientDuplexStream<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse__Output>; + unimplementedBidi(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse__Output>; + unimplementedBidi(options?: grpc.CallOptions): grpc.ClientDuplexStream<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse__Output>; + +} + +export interface EchoTestServiceHandlers extends grpc.UntypedServiceImplementation { + BidiStream: grpc.handleBidiStreamingCall<_grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse>; + + /** + * A service which checks that the initial metadata sent over contains some + * expected key value pair + */ + CheckClientInitialMetadata: grpc.handleUnaryCall<_grpc_testing_SimpleRequest__Output, _grpc_testing_SimpleResponse>; + + CheckDeadlineSet: grpc.handleUnaryCall<_grpc_testing_SimpleRequest__Output, _grpc_testing_StringValue>; + + CheckDeadlineUpperBound: grpc.handleUnaryCall<_grpc_testing_SimpleRequest__Output, _grpc_testing_StringValue>; + + Echo: grpc.handleUnaryCall<_grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse>; + + Echo1: grpc.handleUnaryCall<_grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse>; + + Echo2: grpc.handleUnaryCall<_grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse>; + + RequestStream: grpc.handleClientStreamingCall<_grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse>; + + ResponseStream: grpc.handleServerStreamingCall<_grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse>; + + Unimplemented: grpc.handleUnaryCall<_grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse>; + + UnimplementedBidi: grpc.handleBidiStreamingCall<_grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse>; + +} + +export interface EchoTestServiceDefinition extends grpc.ServiceDefinition { + BidiStream: MethodDefinition<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse, _grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse__Output> + CheckClientInitialMetadata: MethodDefinition<_grpc_testing_SimpleRequest, _grpc_testing_SimpleResponse, _grpc_testing_SimpleRequest__Output, _grpc_testing_SimpleResponse__Output> + CheckDeadlineSet: MethodDefinition<_grpc_testing_SimpleRequest, _grpc_testing_StringValue, _grpc_testing_SimpleRequest__Output, _grpc_testing_StringValue__Output> + CheckDeadlineUpperBound: MethodDefinition<_grpc_testing_SimpleRequest, _grpc_testing_StringValue, _grpc_testing_SimpleRequest__Output, _grpc_testing_StringValue__Output> + Echo: MethodDefinition<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse, _grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse__Output> + Echo1: MethodDefinition<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse, _grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse__Output> + Echo2: MethodDefinition<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse, _grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse__Output> + RequestStream: MethodDefinition<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse, _grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse__Output> + ResponseStream: MethodDefinition<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse, _grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse__Output> + Unimplemented: MethodDefinition<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse, _grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse__Output> + UnimplementedBidi: MethodDefinition<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse, _grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse__Output> +} diff --git a/packages/grpc-js-xds/test/generated/grpc/testing/ErrorStatus.ts b/packages/grpc-js-xds/test/generated/grpc/testing/ErrorStatus.ts new file mode 100644 index 000000000..42ff36d9a --- /dev/null +++ b/packages/grpc-js-xds/test/generated/grpc/testing/ErrorStatus.ts @@ -0,0 +1,20 @@ +// Original file: proto/grpc/testing/echo_messages.proto + + +/** + * Error status client expects to see. + */ +export interface ErrorStatus { + 'code'?: (number); + 'error_message'?: (string); + 'binary_error_details'?: (string); +} + +/** + * Error status client expects to see. + */ +export interface ErrorStatus__Output { + 'code': (number); + 'error_message': (string); + 'binary_error_details': (string); +} diff --git a/packages/grpc-js-xds/test/generated/grpc/testing/NoRpcService.ts b/packages/grpc-js-xds/test/generated/grpc/testing/NoRpcService.ts new file mode 100644 index 000000000..7427c8097 --- /dev/null +++ b/packages/grpc-js-xds/test/generated/grpc/testing/NoRpcService.ts @@ -0,0 +1,19 @@ +// Original file: proto/grpc/testing/echo.proto + +import type * as grpc from '@grpc/grpc-js' +import type { MethodDefinition } from '@grpc/proto-loader' + +/** + * A service without any rpc defined to test coverage. + */ +export interface NoRpcServiceClient extends grpc.Client { +} + +/** + * A service without any rpc defined to test coverage. + */ +export interface NoRpcServiceHandlers extends grpc.UntypedServiceImplementation { +} + +export interface NoRpcServiceDefinition extends grpc.ServiceDefinition { +} diff --git a/packages/grpc-js-xds/test/generated/grpc/testing/RequestParams.ts b/packages/grpc-js-xds/test/generated/grpc/testing/RequestParams.ts new file mode 100644 index 000000000..e8c5ef1d1 --- /dev/null +++ b/packages/grpc-js-xds/test/generated/grpc/testing/RequestParams.ts @@ -0,0 +1,75 @@ +// Original file: proto/grpc/testing/echo_messages.proto + +import type { DebugInfo as _grpc_testing_DebugInfo, DebugInfo__Output as _grpc_testing_DebugInfo__Output } from '../../grpc/testing/DebugInfo'; +import type { ErrorStatus as _grpc_testing_ErrorStatus, ErrorStatus__Output as _grpc_testing_ErrorStatus__Output } from '../../grpc/testing/ErrorStatus'; +import type { OrcaLoadReport as _xds_data_orca_v3_OrcaLoadReport, OrcaLoadReport__Output as _xds_data_orca_v3_OrcaLoadReport__Output } from '../../xds/data/orca/v3/OrcaLoadReport'; + +export interface RequestParams { + 'echo_deadline'?: (boolean); + 'client_cancel_after_us'?: (number); + 'server_cancel_after_us'?: (number); + 'echo_metadata'?: (boolean); + 'check_auth_context'?: (boolean); + 'response_message_length'?: (number); + 'echo_peer'?: (boolean); + /** + * will force check_auth_context. + */ + 'expected_client_identity'?: (string); + 'skip_cancelled_check'?: (boolean); + 'expected_transport_security_type'?: (string); + 'debug_info'?: (_grpc_testing_DebugInfo | null); + /** + * Server should not see a request with this set. + */ + 'server_die'?: (boolean); + 'binary_error_details'?: (string); + 'expected_error'?: (_grpc_testing_ErrorStatus | null); + /** + * sleep when invoking server for deadline tests + */ + 'server_sleep_us'?: (number); + /** + * which backend to send request to + */ + 'backend_channel_idx'?: (number); + 'echo_metadata_initially'?: (boolean); + 'server_notify_client_when_started'?: (boolean); + 'backend_metrics'?: (_xds_data_orca_v3_OrcaLoadReport | null); + 'echo_host_from_authority_header'?: (boolean); +} + +export interface RequestParams__Output { + 'echo_deadline': (boolean); + 'client_cancel_after_us': (number); + 'server_cancel_after_us': (number); + 'echo_metadata': (boolean); + 'check_auth_context': (boolean); + 'response_message_length': (number); + 'echo_peer': (boolean); + /** + * will force check_auth_context. + */ + 'expected_client_identity': (string); + 'skip_cancelled_check': (boolean); + 'expected_transport_security_type': (string); + 'debug_info': (_grpc_testing_DebugInfo__Output | null); + /** + * Server should not see a request with this set. + */ + 'server_die': (boolean); + 'binary_error_details': (string); + 'expected_error': (_grpc_testing_ErrorStatus__Output | null); + /** + * sleep when invoking server for deadline tests + */ + 'server_sleep_us': (number); + /** + * which backend to send request to + */ + 'backend_channel_idx': (number); + 'echo_metadata_initially': (boolean); + 'server_notify_client_when_started': (boolean); + 'backend_metrics': (_xds_data_orca_v3_OrcaLoadReport__Output | null); + 'echo_host_from_authority_header': (boolean); +} diff --git a/packages/grpc-js-xds/test/generated/grpc/testing/ResponseParams.ts b/packages/grpc-js-xds/test/generated/grpc/testing/ResponseParams.ts new file mode 100644 index 000000000..588e463c2 --- /dev/null +++ b/packages/grpc-js-xds/test/generated/grpc/testing/ResponseParams.ts @@ -0,0 +1,15 @@ +// Original file: proto/grpc/testing/echo_messages.proto + +import type { Long } from '@grpc/proto-loader'; + +export interface ResponseParams { + 'request_deadline'?: (number | string | Long); + 'host'?: (string); + 'peer'?: (string); +} + +export interface ResponseParams__Output { + 'request_deadline': (string); + 'host': (string); + 'peer': (string); +} diff --git a/packages/grpc-js-xds/test/generated/grpc/testing/SimpleRequest.ts b/packages/grpc-js-xds/test/generated/grpc/testing/SimpleRequest.ts new file mode 100644 index 000000000..292a2020c --- /dev/null +++ b/packages/grpc-js-xds/test/generated/grpc/testing/SimpleRequest.ts @@ -0,0 +1,8 @@ +// Original file: proto/grpc/testing/simple_messages.proto + + +export interface SimpleRequest { +} + +export interface SimpleRequest__Output { +} diff --git a/packages/grpc-js-xds/test/generated/grpc/testing/SimpleResponse.ts b/packages/grpc-js-xds/test/generated/grpc/testing/SimpleResponse.ts new file mode 100644 index 000000000..3e8735e5e --- /dev/null +++ b/packages/grpc-js-xds/test/generated/grpc/testing/SimpleResponse.ts @@ -0,0 +1,8 @@ +// Original file: proto/grpc/testing/simple_messages.proto + + +export interface SimpleResponse { +} + +export interface SimpleResponse__Output { +} diff --git a/packages/grpc-js-xds/test/generated/grpc/testing/StringValue.ts b/packages/grpc-js-xds/test/generated/grpc/testing/StringValue.ts new file mode 100644 index 000000000..4a779ae2b --- /dev/null +++ b/packages/grpc-js-xds/test/generated/grpc/testing/StringValue.ts @@ -0,0 +1,10 @@ +// Original file: proto/grpc/testing/simple_messages.proto + + +export interface StringValue { + 'message'?: (string); +} + +export interface StringValue__Output { + 'message': (string); +} diff --git a/packages/grpc-js-xds/test/generated/grpc/testing/UnimplementedEchoService.ts b/packages/grpc-js-xds/test/generated/grpc/testing/UnimplementedEchoService.ts new file mode 100644 index 000000000..48128976e --- /dev/null +++ b/packages/grpc-js-xds/test/generated/grpc/testing/UnimplementedEchoService.ts @@ -0,0 +1,27 @@ +// Original file: proto/grpc/testing/echo.proto + +import type * as grpc from '@grpc/grpc-js' +import type { MethodDefinition } from '@grpc/proto-loader' +import type { EchoRequest as _grpc_testing_EchoRequest, EchoRequest__Output as _grpc_testing_EchoRequest__Output } from '../../grpc/testing/EchoRequest'; +import type { EchoResponse as _grpc_testing_EchoResponse, EchoResponse__Output as _grpc_testing_EchoResponse__Output } from '../../grpc/testing/EchoResponse'; + +export interface UnimplementedEchoServiceClient extends grpc.Client { + Unimplemented(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Unimplemented(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Unimplemented(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + Unimplemented(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + unimplemented(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + unimplemented(argument: _grpc_testing_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + unimplemented(argument: _grpc_testing_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + unimplemented(argument: _grpc_testing_EchoRequest, callback: grpc.requestCallback<_grpc_testing_EchoResponse__Output>): grpc.ClientUnaryCall; + +} + +export interface UnimplementedEchoServiceHandlers extends grpc.UntypedServiceImplementation { + Unimplemented: grpc.handleUnaryCall<_grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse>; + +} + +export interface UnimplementedEchoServiceDefinition extends grpc.ServiceDefinition { + Unimplemented: MethodDefinition<_grpc_testing_EchoRequest, _grpc_testing_EchoResponse, _grpc_testing_EchoRequest__Output, _grpc_testing_EchoResponse__Output> +} diff --git a/packages/grpc-js-xds/test/generated/xds/data/orca/v3/OrcaLoadReport.ts b/packages/grpc-js-xds/test/generated/xds/data/orca/v3/OrcaLoadReport.ts new file mode 100644 index 000000000..d66c42713 --- /dev/null +++ b/packages/grpc-js-xds/test/generated/xds/data/orca/v3/OrcaLoadReport.ts @@ -0,0 +1,59 @@ +// Original file: proto/grpc/testing/xds/v3/orca_load_report.proto + +import type { Long } from '@grpc/proto-loader'; + +export interface OrcaLoadReport { + /** + * CPU utilization expressed as a fraction of available CPU resources. This + * should be derived from the latest sample or measurement. + */ + 'cpu_utilization'?: (number | string); + /** + * Memory utilization expressed as a fraction of available memory + * resources. This should be derived from the latest sample or measurement. + */ + 'mem_utilization'?: (number | string); + /** + * Total RPS being served by an endpoint. This should cover all services that an endpoint is + * responsible for. + */ + 'rps'?: (number | string | Long); + /** + * Application specific requests costs. Each value is an absolute cost (e.g. 3487 bytes of + * storage) associated with the request. + */ + 'request_cost'?: ({[key: string]: number | string}); + /** + * Resource utilization values. Each value is expressed as a fraction of total resources + * available, derived from the latest sample or measurement. + */ + 'utilization'?: ({[key: string]: number | string}); +} + +export interface OrcaLoadReport__Output { + /** + * CPU utilization expressed as a fraction of available CPU resources. This + * should be derived from the latest sample or measurement. + */ + 'cpu_utilization': (number | string); + /** + * Memory utilization expressed as a fraction of available memory + * resources. This should be derived from the latest sample or measurement. + */ + 'mem_utilization': (number | string); + /** + * Total RPS being served by an endpoint. This should cover all services that an endpoint is + * responsible for. + */ + 'rps': (string); + /** + * Application specific requests costs. Each value is an absolute cost (e.g. 3487 bytes of + * storage) associated with the request. + */ + 'request_cost': ({[key: string]: number | string}); + /** + * Resource utilization values. Each value is expressed as a fraction of total resources + * available, derived from the latest sample or measurement. + */ + 'utilization': ({[key: string]: number | string}); +} diff --git a/packages/grpc-js-xds/test/test-bootstrap.ts b/packages/grpc-js-xds/test/test-bootstrap.ts new file mode 100644 index 000000000..f7874d2a1 --- /dev/null +++ b/packages/grpc-js-xds/test/test-bootstrap.ts @@ -0,0 +1,33 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import * as assert from 'assert'; +import { validateXdsServerConfig } from "../src/xds-bootstrap"; + +describe('bootstrap', () => { + /* validateXdsServerConfig is used when creating the cds config, and then + * the resulting value is validated again when creating the + * xds_cluster_resolver config. */ + it('validateXdsServerConfig should be idempotent', () => { + const config = { + server_uri: 'localhost', + channel_creds: [{type: 'google_default'}], + server_features: ['test_feature'] + }; + assert.deepStrictEqual(validateXdsServerConfig(validateXdsServerConfig(config)), validateXdsServerConfig(config)); + }); +}); diff --git a/packages/grpc-js-xds/test/test-cluster-type.ts b/packages/grpc-js-xds/test/test-cluster-type.ts new file mode 100644 index 000000000..416f17727 --- /dev/null +++ b/packages/grpc-js-xds/test/test-cluster-type.ts @@ -0,0 +1,180 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { register } from "../src"; +import assert = require("assert"); +import { XdsServer } from "./xds-server"; +import { XdsTestClient } from "./client"; +import { FakeAggregateCluster, FakeDnsCluster, FakeEdsCluster, FakeRouteGroup } from "./framework"; +import { Backend } from "./backend"; + +register(); + +describe('Cluster types', () => { + let xdsServer: XdsServer; + let client: XdsTestClient; + beforeEach(done => { + xdsServer = new XdsServer(); + xdsServer.startServer(error => { + done(error); + }); + }); + afterEach(() => { + client?.close(); + xdsServer?.shutdownServer(); + }); + describe('Logical DNS Clusters', () => { + it('Should successfully make RPCs', done => { + const cluster = new FakeDnsCluster('dnsCluster', new Backend()); + const routeGroup = new FakeRouteGroup('listener1', 'route1', [{cluster: cluster}]); + routeGroup.startAllBackends().then(() => { + xdsServer.addResponseListener((typeUrl, responseState) => { + if (responseState.state === 'NACKED') { + assert.fail(`Client NACKED ${typeUrl} resource with message ${responseState.errorMessage}`); + } + }); + xdsServer.setCdsResource(cluster.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + client = XdsTestClient.createFromServer('listener1', xdsServer); + client.sendOneCall(error => { + done(error); + }); + }, reason => done(reason)); + }); + }); + /* These tests pass on Node 18 fail on Node 16, probably because of + * https://github.com/nodejs/node/issues/42713 */ + describe.skip('Aggregate DNS Clusters', () => { + it('Should result in prioritized clusters', () => { + const backend1 = new Backend(); + const backend2 = new Backend(); + const cluster1 = new FakeEdsCluster('cluster1', 'endpoint1', [{backends: [backend1], locality:{region: 'region1'}}]); + const cluster2 = new FakeEdsCluster('cluster2', 'endpoint2', [{backends: [backend2], locality:{region: 'region2'}}]); + const aggregateCluster = new FakeAggregateCluster('aggregateCluster', [cluster1, cluster2]); + const routeGroup = new FakeRouteGroup('listener1', 'route1', [{cluster: aggregateCluster}]); + return routeGroup.startAllBackends().then(() => { + xdsServer.setEdsResource(cluster1.getEndpointConfig()); + xdsServer.setCdsResource(cluster1.getClusterConfig()); + xdsServer.setEdsResource(cluster2.getEndpointConfig()); + xdsServer.setCdsResource(cluster2.getClusterConfig()); + xdsServer.setCdsResource(aggregateCluster.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + xdsServer.addResponseListener((typeUrl, responseState) => { + if (responseState.state === 'NACKED') { + client.stopCalls(); + assert.fail(`Client NACKED ${typeUrl} resource with message ${responseState.errorMessage}`); + } + }); + client = XdsTestClient.createFromServer('listener1', xdsServer); + client.startCalls(100); + return cluster1.waitForAllBackendsToReceiveTraffic(); + }).then(() => backend1.shutdownAsync() + ).then(() => cluster2.waitForAllBackendsToReceiveTraffic() + ).then(() => backend1.startAsync() + ).then(() => cluster1.waitForAllBackendsToReceiveTraffic()); + }); + it('Should handle a diamond dependency', () => { + const backend1 = new Backend(); + const backend2 = new Backend(); + const cluster1 = new FakeEdsCluster('cluster1', 'endpoint1', [{backends: [backend1], locality:{region: 'region1'}}]); + const cluster2 = new FakeEdsCluster('cluster2', 'endpoint2', [{backends: [backend2], locality:{region: 'region2'}}]); + const aggregateCluster1 = new FakeAggregateCluster('aggregateCluster1', [cluster1, cluster2]); + const aggregateCluster2 = new FakeAggregateCluster('aggregateCluster2', [cluster1, aggregateCluster1]); + const routeGroup = new FakeRouteGroup('listener1', 'route1', [{cluster: aggregateCluster2}]); + return Promise.all([backend1.startAsync(), backend2.startAsync()]).then(() => { + xdsServer.setEdsResource(cluster1.getEndpointConfig()); + xdsServer.setCdsResource(cluster1.getClusterConfig()); + xdsServer.setEdsResource(cluster2.getEndpointConfig()); + xdsServer.setCdsResource(cluster2.getClusterConfig()); + xdsServer.setCdsResource(aggregateCluster1.getClusterConfig()); + xdsServer.setCdsResource(aggregateCluster2.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + xdsServer.addResponseListener((typeUrl, responseState) => { + if (responseState.state === 'NACKED') { + client.stopCalls(); + assert.fail(`Client NACKED ${typeUrl} resource with message ${responseState.errorMessage}`); + } + }); + client = XdsTestClient.createFromServer('listener1', xdsServer); + client.startCalls(100); + return cluster1.waitForAllBackendsToReceiveTraffic(); + }).then(() => backend1.shutdownAsync() + ).then(() => cluster2.waitForAllBackendsToReceiveTraffic() + ).then(() => backend1.startAsync() + ).then(() => cluster1.waitForAllBackendsToReceiveTraffic()); + }); + it('Should handle EDS then DNS cluster order', () => { + const backend1 = new Backend(); + const backend2 = new Backend(); + const cluster1 = new FakeEdsCluster('cluster1', 'endpoint1', [{backends: [backend1], locality:{region: 'region1'}}]); + const cluster2 = new FakeDnsCluster('cluster2', backend2); + const aggregateCluster = new FakeAggregateCluster('aggregateCluster', [cluster1, cluster2]); + const routeGroup = new FakeRouteGroup('listener1', 'route1', [{cluster: aggregateCluster}]); + return routeGroup.startAllBackends().then(() => { + xdsServer.setEdsResource(cluster1.getEndpointConfig()); + xdsServer.setCdsResource(cluster1.getClusterConfig()); + xdsServer.setCdsResource(cluster2.getClusterConfig()); + xdsServer.setCdsResource(aggregateCluster.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + xdsServer.addResponseListener((typeUrl, responseState) => { + if (responseState.state === 'NACKED') { + client.stopCalls(); + assert.fail(`Client NACKED ${typeUrl} resource with message ${responseState.errorMessage}`); + } + }); + client = XdsTestClient.createFromServer('listener1', xdsServer); + client.startCalls(100); + return cluster1.waitForAllBackendsToReceiveTraffic(); + }).then(() => backend1.shutdownAsync() + ).then(() => cluster2.waitForAllBackendsToReceiveTraffic() + ).then(() => backend1.startAsync() + ).then(() => cluster1.waitForAllBackendsToReceiveTraffic()); + }); + it('Should handle DNS then EDS cluster order', () => { + const backend1 = new Backend(); + const backend2 = new Backend(); + const cluster1 = new FakeDnsCluster('cluster1', backend1); + const cluster2 = new FakeEdsCluster('cluster2', 'endpoint2', [{backends: [backend2], locality:{region: 'region2'}}]); + const aggregateCluster = new FakeAggregateCluster('aggregateCluster', [cluster1, cluster2]); + const routeGroup = new FakeRouteGroup('listener1', 'route1', [{cluster: aggregateCluster}]); + return routeGroup.startAllBackends().then(() => { + xdsServer.setCdsResource(cluster1.getClusterConfig()); + xdsServer.setEdsResource(cluster2.getEndpointConfig()); + xdsServer.setCdsResource(cluster2.getClusterConfig()); + xdsServer.setCdsResource(aggregateCluster.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + xdsServer.addResponseListener((typeUrl, responseState) => { + if (responseState.state === 'NACKED') { + client.stopCalls(); + assert.fail(`Client NACKED ${typeUrl} resource with message ${responseState.errorMessage}`); + } + }); + client = XdsTestClient.createFromServer('listener1', xdsServer); + client.startCalls(100); + return cluster1.waitForAllBackendsToReceiveTraffic(); + }).then(() => backend1.shutdownAsync() + ).then(() => cluster2.waitForAllBackendsToReceiveTraffic() + ).then(() => backend1.startAsync() + ).then(() => cluster1.waitForAllBackendsToReceiveTraffic()); + }); + }); +}); diff --git a/packages/grpc-js-xds/test/test-confg-parsing.ts b/packages/grpc-js-xds/test/test-confg-parsing.ts new file mode 100644 index 000000000..c185c8527 --- /dev/null +++ b/packages/grpc-js-xds/test/test-confg-parsing.ts @@ -0,0 +1,378 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { experimental, LoadBalancingConfig } from "@grpc/grpc-js"; +import { register } from "../src"; +import assert = require("assert"); +import parseLoadbalancingConfig = experimental.parseLoadBalancingConfig; +import { EXPERIMENTAL_RING_HASH } from "../src/environment"; + +register(); + +/** + * Describes a test case for config parsing. input is passed to + * parseLoadBalancingConfig. If error is set, the expectation is that that + * operation throws an error with a matching message. Otherwise, toJsonObject + * is called on the result, and it is expected to match output, or input if + * output is unset. + */ +interface TestCase { + name: string; + input: object, + output?: object; + error?: RegExp; + skipIf?: boolean; +} + +/* The main purpose of these tests is to verify that configs that are expected + * to be valid parse successfully, and configs that are expected to be invalid + * throw errors. The specific output of this parsing is a lower priority + * concern. + * Note: some tests have an expected output that is different from the output, + * but all non-error tests additionally verify that parsing the output again + * produces the same output. */ +const allTestCases: {[lbPolicyName: string]: TestCase[]} = { + cds: [ + { + name: 'populated cluster field', + input: { + cluster: 'abc' + } + }, + { + name: 'empty', + input: {}, + error: /cluster/ + }, + { + name: 'non-string cluster', + input: { + cluster: 123 + }, + error: /string.*cluster/ + } + ], + xds_cluster_resolver: [ + { + name: 'empty fields', + input: { + discovery_mechanisms: [], + xds_lb_policy: [] + } + }, + { + name: 'missing discovery_mechanisms', + input: { + xds_lb_policy: [] + }, + error: /discovery_mechanisms/ + }, + { + name: 'missing xds_lb_policy', + input: { + discovery_mechanisms: [] + }, + error: /xds_lb_policy/ + }, + { + name: 'discovery_mechanism: EDS', + input: { + discovery_mechanisms: [{ + cluster: 'abc', + type: 'EDS' + }], + xds_lb_policy: [] + }, + output: { + discovery_mechanisms: [{ + cluster: 'abc', + type: 'EDS', + lrs_load_reporting_server: undefined + }], + xds_lb_policy: [] + } + }, + { + name: 'discovery_mechanism: LOGICAL_DNS', + input: { + discovery_mechanisms: [{ + cluster: 'abc', + type: 'LOGICAL_DNS' + }], + xds_lb_policy: [] + }, + output: { + discovery_mechanisms: [{ + cluster: 'abc', + type: 'LOGICAL_DNS', + lrs_load_reporting_server: undefined + }], + xds_lb_policy: [] + } + }, + { + name: 'discovery_mechanism: undefined optional fields', + input: { + discovery_mechanisms: [{ + cluster: 'abc', + type: 'EDS', + max_concurrent_requests: undefined, + eds_service_name: undefined, + dns_hostname: undefined, + lrs_load_reporting_server: undefined + }], + xds_lb_policy: [] + } + }, + { + name: 'discovery_mechanism: populated optional fields', + input: { + discovery_mechanisms: [{ + cluster: 'abc', + type: 'EDS', + max_concurrent_requests: 100, + eds_service_name: 'def', + dns_hostname: 'localhost', + lrs_load_reporting_server: { + server_uri: 'localhost:12345', + channel_creds: [{ + type: 'google_default', + config: {} + }], + server_features: ['test'] + } + }], + xds_lb_policy: [] + } + } + ], + xds_cluster_impl: [ + { + name: 'only required fields', + input: { + cluster: 'abc', + eds_service_name: 'def', + drop_categories: [], + lrs_load_reporting_server: { + server_uri: 'localhost:12345', + channel_creds: [{ + type: 'google_default', + config: {} + }], + server_features: ['test'] + }, + child_policy: [{round_robin: {}}] + }, + output: { + cluster: 'abc', + eds_service_name: 'def', + drop_categories: [], + lrs_load_reporting_server: { + server_uri: 'localhost:12345', + channel_creds: [{ + type: 'google_default', + config: {} + }], + server_features: ['test'] + }, + child_policy: [{round_robin: {}}], + max_concurrent_requests: 1024 + } + }, + { + name: 'undefined optional fields', + input: { + cluster: 'abc', + eds_service_name: 'def', + drop_categories: [], + lrs_load_reporting_server: { + server_uri: 'localhost:12345', + channel_creds: [{ + type: 'google_default', + config: {} + }], + server_features: ['test'] + }, + child_policy: [{round_robin: {}}], + max_concurrent_requests: undefined + }, + output: { + cluster: 'abc', + eds_service_name: 'def', + drop_categories: [], + lrs_load_reporting_server: { + server_uri: 'localhost:12345', + channel_creds: [{ + type: 'google_default', + config: {} + }], + server_features: ['test'] + }, + child_policy: [{round_robin: {}}], + max_concurrent_requests: 1024 + } + }, + { + name: 'populated optional fields', + input: { + cluster: 'abc', + eds_service_name: 'def', + drop_categories: [{ + category: 'test', + requests_per_million: 100 + }], + lrs_load_reporting_server: { + server_uri: 'localhost:12345', + channel_creds: [{ + type: 'google_default', + config: {} + }], + server_features: ['test'] + }, + child_policy: [{round_robin: {}}], + max_concurrent_requests: 123 + }, + } + ], + priority: [ + { + name: 'empty fields', + input: { + children: {}, + priorities: [] + } + }, + { + name: 'populated fields', + input: { + children: { + child0: { + config: [{round_robin: {}}], + ignore_reresolution_requests: true + }, + child1: { + config: [{round_robin: {}}], + ignore_reresolution_requests: false + } + }, + priorities: ['child0', 'child1'] + } + } + ], + weighted_target: [ + { + name: 'empty targets field', + input: { + targets: {} + } + }, + { + name: 'populated targets field', + input: { + targets: { + target0: { + weight: 1, + child_policy: [{round_robin: {}}] + }, + target1: { + weight: 2, + child_policy: [{round_robin: {}}] + } + } + } + } + ], + xds_cluster_manager: [ + { + name: 'empty children field', + input: { + children: {} + } + }, + { + name: 'populated children field', + input: { + children: { + child0: { + child_policy: [{round_robin: {}}] + } + } + } + } + ], + ring_hash: [ + { + name: 'empty config', + input: {}, + output: { + min_ring_size: 1024, + max_ring_size: 4096 + }, + skipIf: !EXPERIMENTAL_RING_HASH + }, + { + name: 'populated config', + input: { + min_ring_size: 2048, + max_ring_size: 8192 + }, + skipIf: !EXPERIMENTAL_RING_HASH + }, + { + name: 'min_ring_size too large', + input: { + min_ring_size: 8_388_609 + }, + error: /min_ring_size/, + skipIf: !EXPERIMENTAL_RING_HASH + }, + { + name: 'max_ring_size too large', + input: { + max_ring_size: 8_388_609 + }, + error: /max_ring_size/, + skipIf: !EXPERIMENTAL_RING_HASH + } + ] +} + +describe('Load balancing policy config parsing', () => { + for (const [lbPolicyName, testCases] of Object.entries(allTestCases)) { + describe(lbPolicyName, () => { + for (const testCase of testCases) { + it(testCase.name, function() { + if (testCase.skipIf) { + this.skip(); + } + const lbConfigInput = {[lbPolicyName]: testCase.input}; + if (testCase.error) { + assert.throws(() => { + parseLoadbalancingConfig(lbConfigInput); + }, testCase.error); + } else { + const expectedOutput = testCase.output ?? testCase.input; + const parsedJson = parseLoadbalancingConfig(lbConfigInput).toJsonObject(); + assert.deepStrictEqual(parsedJson, {[lbPolicyName]: expectedOutput}); + // Test idempotency + assert.deepStrictEqual(parseLoadbalancingConfig(parsedJson).toJsonObject(), parsedJson); + } + }); + } + }); + } +}); diff --git a/packages/grpc-js-xds/test/test-core.ts b/packages/grpc-js-xds/test/test-core.ts new file mode 100644 index 000000000..5d71ff8b8 --- /dev/null +++ b/packages/grpc-js-xds/test/test-core.ts @@ -0,0 +1,122 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { Backend } from "./backend"; +import { XdsTestClient } from "./client"; +import { FakeEdsCluster, FakeRouteGroup } from "./framework"; +import { XdsServer } from "./xds-server"; + +import { register } from "../src"; +import assert = require("assert"); +import { connectivityState } from "@grpc/grpc-js"; + +register(); + +describe('core xDS functionality', () => { + let xdsServer: XdsServer; + let client: XdsTestClient; + beforeEach(done => { + xdsServer = new XdsServer(); + xdsServer.startServer(error => { + done(error); + }); + }); + afterEach(() => { + client?.close(); + xdsServer?.shutdownServer(); + }) + it('should route requests to the single backend', done => { + const cluster = new FakeEdsCluster('cluster1', 'endpoint1', [{backends: [new Backend()], locality:{region: 'region1'}}]); + const routeGroup = new FakeRouteGroup('listener1', 'route1', [{cluster: cluster}]); + routeGroup.startAllBackends().then(() => { + xdsServer.setEdsResource(cluster.getEndpointConfig()); + xdsServer.setCdsResource(cluster.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + xdsServer.addResponseListener((typeUrl, responseState) => { + if (responseState.state === 'NACKED') { + client.stopCalls(); + assert.fail(`Client NACKED ${typeUrl} resource with message ${responseState.errorMessage}`); + } + }) + client = XdsTestClient.createFromServer('listener1', xdsServer); + client.startCalls(100); + routeGroup.waitForAllBackendsToReceiveTraffic().then(() => { + client.stopCalls(); + done(); + }, reason => done(reason)); + }, reason => done(reason)); + }); + it('should be able to enter and exit idle', function(done) { + this.timeout(5000); + const cluster = new FakeEdsCluster('cluster1', 'endpoint1', [{backends: [new Backend()], locality:{region: 'region1'}}]); + const routeGroup = new FakeRouteGroup('listener1', 'route1', [{cluster: cluster}]); + routeGroup.startAllBackends().then(() => { + xdsServer.setEdsResource(cluster.getEndpointConfig()); + xdsServer.setCdsResource(cluster.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + xdsServer.addResponseListener((typeUrl, responseState) => { + if (responseState.state === 'NACKED') { + client.stopCalls(); + assert.fail(`Client NACKED ${typeUrl} resource with message ${responseState.errorMessage}`); + } + }) + client = XdsTestClient.createFromServer('listener1', xdsServer, { + 'grpc.client_idle_timeout_ms': 1000, + }); + client.sendOneCall(error => { + assert.ifError(error); + assert.strictEqual(client.getConnectivityState(), connectivityState.READY); + setTimeout(() => { + assert.strictEqual(client.getConnectivityState(), connectivityState.IDLE); + client.sendOneCall(error => { + done(error); + }) + }, 1100); + }); + }, reason => done(reason)); + }); + it('should handle connections aging out', function(done) { + this.timeout(5000); + const cluster = new FakeEdsCluster('cluster1', 'endpoint1', [{backends: [new Backend({'grpc.max_connection_age_ms': 1000})], locality:{region: 'region1'}}]); + const routeGroup = new FakeRouteGroup('listener1', 'route1', [{cluster: cluster}]); + routeGroup.startAllBackends().then(() => { + xdsServer.setEdsResource(cluster.getEndpointConfig()); + xdsServer.setCdsResource(cluster.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + xdsServer.addResponseListener((typeUrl, responseState) => { + if (responseState.state === 'NACKED') { + client.stopCalls(); + assert.fail(`Client NACKED ${typeUrl} resource with message ${responseState.errorMessage}`); + } + }) + client = XdsTestClient.createFromServer('listener1', xdsServer); + client.sendOneCall(error => { + assert.ifError(error); + // Make another call after the max_connection_age_ms expires + setTimeout(() => { + client.sendOneCall(error => { + done(error); + }) + }, 1100); + }); + }, reason => done(reason)); + + }) +}); diff --git a/packages/grpc-js-xds/test/test-custom-lb-policies.ts b/packages/grpc-js-xds/test/test-custom-lb-policies.ts new file mode 100644 index 000000000..443601e36 --- /dev/null +++ b/packages/grpc-js-xds/test/test-custom-lb-policies.ts @@ -0,0 +1,325 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { AnyExtension } from "@grpc/proto-loader"; +import { Any } from "../src/generated/google/protobuf/Any"; +import { Backend } from "./backend"; +import { XdsTestClient } from "./client"; +import { FakeEdsCluster, FakeRouteGroup } from "./framework"; +import { XdsServer } from "./xds-server"; +import * as assert from 'assert'; +import { WrrLocality } from "../src/generated/envoy/extensions/load_balancing_policies/wrr_locality/v3/WrrLocality"; +import { TypedStruct } from "../src/generated/xds/type/v3/TypedStruct"; +import { ChannelOptions, connectivityState, experimental, logVerbosity } from "@grpc/grpc-js"; + +import TypedLoadBalancingConfig = experimental.TypedLoadBalancingConfig; +import LoadBalancer = experimental.LoadBalancer; +import ChannelControlHelper = experimental.ChannelControlHelper; +import ChildLoadBalancerHandler = experimental.ChildLoadBalancerHandler; +import Endpoint = experimental.Endpoint; +import Picker = experimental.Picker; +import PickArgs = experimental.PickArgs; +import PickResult = experimental.PickResult; +import PickResultType = experimental.PickResultType; +import createChildChannelControlHelper = experimental.createChildChannelControlHelper; +import parseLoadBalancingConfig = experimental.parseLoadBalancingConfig; +import registerLoadBalancerType = experimental.registerLoadBalancerType; +import { PickFirst } from "../src/generated/envoy/extensions/load_balancing_policies/pick_first/v3/PickFirst"; + +const LB_POLICY_NAME = 'test.RpcBehaviorLoadBalancer'; + +class RpcBehaviorLoadBalancingConfig implements TypedLoadBalancingConfig { + constructor(private rpcBehavior: string) {} + getLoadBalancerName(): string { + return LB_POLICY_NAME; + } + toJsonObject(): object { + return { + [LB_POLICY_NAME]: { + 'rpcBehavior': this.rpcBehavior + } + }; + } + getRpcBehavior() { + return this.rpcBehavior; + } + static createFromJson(obj: any): RpcBehaviorLoadBalancingConfig { + if (!('rpcBehavior' in obj && typeof obj.rpcBehavior === 'string')) { + throw new Error(`${LB_POLICY_NAME} parsing error: expected string field rpcBehavior`); + } + return new RpcBehaviorLoadBalancingConfig(obj.rpcBehavior); + } +} + +class RpcBehaviorPicker implements Picker { + constructor(private wrappedPicker: Picker, private rpcBehavior: string) {} + pick(pickArgs: PickArgs): PickResult { + const wrappedPick = this.wrappedPicker.pick(pickArgs); + if (wrappedPick.pickResultType === PickResultType.COMPLETE) { + pickArgs.metadata.add('rpc-behavior', this.rpcBehavior); + } + return wrappedPick; + } +} + +const RPC_BEHAVIOR_CHILD_CONFIG = parseLoadBalancingConfig({round_robin: {}}); + +/** + * Load balancer implementation for Custom LB policy test + */ +class RpcBehaviorLoadBalancer implements LoadBalancer { + private child: ChildLoadBalancerHandler; + private latestConfig: RpcBehaviorLoadBalancingConfig | null = null; + constructor(channelControlHelper: ChannelControlHelper, options: ChannelOptions) { + const childChannelControlHelper = createChildChannelControlHelper(channelControlHelper, { + updateState: (state, picker) => { + if (state === connectivityState.READY && this.latestConfig) { + picker = new RpcBehaviorPicker(picker, this.latestConfig.getRpcBehavior()); + } + channelControlHelper.updateState(state, picker); + } + }); + this.child = new ChildLoadBalancerHandler(childChannelControlHelper, options); + } + updateAddressList(endpointList: Endpoint[], lbConfig: TypedLoadBalancingConfig, attributes: { [key: string]: unknown; }): void { + if (!(lbConfig instanceof RpcBehaviorLoadBalancingConfig)) { + return; + } + this.latestConfig = lbConfig; + this.child.updateAddressList(endpointList, RPC_BEHAVIOR_CHILD_CONFIG, attributes); + } + exitIdle(): void { + this.child.exitIdle(); + } + resetBackoff(): void { + this.child.resetBackoff(); + } + destroy(): void { + this.child.destroy(); + } + getTypeName(): string { + return LB_POLICY_NAME; + } +} + +registerLoadBalancerType(LB_POLICY_NAME, RpcBehaviorLoadBalancer, RpcBehaviorLoadBalancingConfig); + +describe('Custom LB policies', () => { + let xdsServer: XdsServer; + let client: XdsTestClient; + beforeEach(done => { + xdsServer = new XdsServer(); + xdsServer.startServer(error => { + done(error); + }); + }); + afterEach(() => { + client?.close(); + xdsServer?.shutdownServer(); + }); + it('Should handle round_robin', done => { + const lbPolicy: Any = { + '@type': 'type.googleapis.com/envoy.extensions.load_balancing_policies.round_robin.v3.RoundRobin' + }; + const cluster = new FakeEdsCluster('cluster1', 'endpoint1', [{backends: [new Backend()], locality:{region: 'region1'}}], lbPolicy); + const routeGroup = new FakeRouteGroup('listener1', 'route1', [{cluster: cluster}]); + routeGroup.startAllBackends().then(() => { + xdsServer.setEdsResource(cluster.getEndpointConfig()); + xdsServer.setCdsResource(cluster.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + xdsServer.addResponseListener((typeUrl, responseState) => { + if (responseState.state === 'NACKED') { + client.stopCalls(); + assert.fail(`Client NACKED ${typeUrl} resource with message ${responseState.errorMessage}`); + } + }) + client = XdsTestClient.createFromServer('listener1', xdsServer); + client.sendOneCall(done); + }, reason => done(reason)); + }); + it('Should handle xds_wrr_locality with round_robin child', done => { + const lbPolicy: WrrLocality & AnyExtension = { + '@type': 'type.googleapis.com/envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality', + endpoint_picking_policy: { + policies: [ + { + typed_extension_config: { + name: 'child', + typed_config: { + '@type': 'type.googleapis.com/envoy.extensions.load_balancing_policies.round_robin.v3.RoundRobin' + } + } + } + ] + } + }; + const cluster = new FakeEdsCluster('cluster1', 'endpoint1', [{backends: [new Backend()], locality:{region: 'region1'}}], lbPolicy); + const routeGroup = new FakeRouteGroup('listener1', 'route1', [{cluster: cluster}]); + routeGroup.startAllBackends().then(() => { + xdsServer.setEdsResource(cluster.getEndpointConfig()); + xdsServer.setCdsResource(cluster.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + xdsServer.addResponseListener((typeUrl, responseState) => { + if (responseState.state === 'NACKED') { + client.stopCalls(); + assert.fail(`Client NACKED ${typeUrl} resource with message ${responseState.errorMessage}`); + } + }) + client = XdsTestClient.createFromServer('listener1', xdsServer); + client.sendOneCall(done); + }, reason => done(reason)); + }); + it('Should handle a typed_struct policy', done => { + const lbPolicy: TypedStruct & AnyExtension = { + '@type': 'type.googleapis.com/xds.type.v3.TypedStruct', + type_url: 'round_robin', + value: { + fields: {} + } + }; + const cluster = new FakeEdsCluster('cluster1', 'endpoint1', [{backends: [new Backend()], locality:{region: 'region1'}}], lbPolicy); + const routeGroup = new FakeRouteGroup('listener1', 'route1', [{cluster: cluster}]); + routeGroup.startAllBackends().then(() => { + xdsServer.setEdsResource(cluster.getEndpointConfig()); + xdsServer.setCdsResource(cluster.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + xdsServer.addResponseListener((typeUrl, responseState) => { + if (responseState.state === 'NACKED') { + client.stopCalls(); + assert.fail(`Client NACKED ${typeUrl} resource with message ${responseState.errorMessage}`); + } + }) + client = XdsTestClient.createFromServer('listener1', xdsServer); + client.sendOneCall(done); + }, reason => done(reason)); + }); + it('Should handle xds_wrr_locality with an unrecognized first child', done => { + const invalidChildPolicy: TypedStruct & AnyExtension = { + '@type': 'type.googleapis.com/xds.type.v3.TypedStruct', + type_url: 'test.ThisLoadBalancerDoesNotExist', + value: { + fields: {} + } + } + const lbPolicy: WrrLocality & AnyExtension = { + '@type': 'type.googleapis.com/envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality', + endpoint_picking_policy: { + policies: [ + { + typed_extension_config: { + name: 'child', + typed_config: invalidChildPolicy + } + }, + { + typed_extension_config: { + name: 'child', + typed_config: { + '@type': 'type.googleapis.com/envoy.extensions.load_balancing_policies.round_robin.v3.RoundRobin' + } + } + } + ] + } + }; + const cluster = new FakeEdsCluster('cluster1', 'endpoint1', [{backends: [new Backend()], locality:{region: 'region1'}}], lbPolicy); + const routeGroup = new FakeRouteGroup('listener1', 'route1', [{cluster: cluster}]); + routeGroup.startAllBackends().then(() => { + xdsServer.setEdsResource(cluster.getEndpointConfig()); + xdsServer.setCdsResource(cluster.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + xdsServer.addResponseListener((typeUrl, responseState) => { + if (responseState.state === 'NACKED') { + client.stopCalls(); + assert.fail(`Client NACKED ${typeUrl} resource with message ${responseState.errorMessage}`); + } + }) + client = XdsTestClient.createFromServer('listener1', xdsServer); + client.sendOneCall(done); + }, reason => done(reason)); + }); + it('Should handle a custom LB policy', done => { + const childPolicy: TypedStruct & AnyExtension = { + '@type': 'type.googleapis.com/xds.type.v3.TypedStruct', + type_url: 'test.RpcBehaviorLoadBalancer', + value: { + fields: { + rpcBehavior: {stringValue: 'error-code-15'} + } + } + }; + const lbPolicy: WrrLocality & AnyExtension = { + '@type': 'type.googleapis.com/envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality', + endpoint_picking_policy: { + policies: [ + { + typed_extension_config: { + name: 'child', + typed_config: childPolicy + } + } + ] + } + }; + const cluster = new FakeEdsCluster('cluster1', 'endpoint1', [{backends: [new Backend()], locality:{region: 'region1'}}], lbPolicy); + const routeGroup = new FakeRouteGroup('listener1', 'route1', [{cluster: cluster}]); + routeGroup.startAllBackends().then(() => { + xdsServer.setEdsResource(cluster.getEndpointConfig()); + xdsServer.setCdsResource(cluster.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + xdsServer.addResponseListener((typeUrl, responseState) => { + if (responseState.state === 'NACKED') { + client.stopCalls(); + assert.fail(`Client NACKED ${typeUrl} resource with message ${responseState.errorMessage}`); + } + }) + client = XdsTestClient.createFromServer('listener1', xdsServer); + client.sendOneCall(error => { + assert.strictEqual(error?.code, 15); + done(); + }); + }, reason => done(reason)); + }); + it('Should handle pick_first', done => { + const lbPolicy: PickFirst & AnyExtension = { + '@type': 'type.googleapis.com/envoy.extensions.load_balancing_policies.pick_first.v3.PickFirst', + shuffle_address_list: true + }; + const cluster = new FakeEdsCluster('cluster1', 'endpoint1', [{backends: [new Backend()], locality:{region: 'region1'}}], lbPolicy); + const routeGroup = new FakeRouteGroup('listener1', 'route1', [{cluster: cluster}]); + routeGroup.startAllBackends().then(() => { + xdsServer.setEdsResource(cluster.getEndpointConfig()); + xdsServer.setCdsResource(cluster.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + xdsServer.addResponseListener((typeUrl, responseState) => { + if (responseState.state === 'NACKED') { + client.stopCalls(); + assert.fail(`Client NACKED ${typeUrl} resource with message ${responseState.errorMessage}`); + } + }) + client = XdsTestClient.createFromServer('listener1', xdsServer); + client.sendOneCall(done); + }, reason => done(reason)); + }); + +}); diff --git a/packages/grpc-js-xds/test/test-federation.ts b/packages/grpc-js-xds/test/test-federation.ts new file mode 100644 index 000000000..5d4099bbf --- /dev/null +++ b/packages/grpc-js-xds/test/test-federation.ts @@ -0,0 +1,209 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { Backend } from "./backend"; +import { XdsTestClient } from "./client"; +import { FakeEdsCluster, FakeRouteGroup } from "./framework"; +import { XdsServer } from "./xds-server"; +import assert = require("assert"); + +/* Test cases in this file are derived from examples in the xDS federation proposal + * https://github.com/grpc/proposal/blob/master/A47-xds-federation.md */ +describe('Federation', () => { + let xdsServers: XdsServer[] = []; + let xdsClient: XdsTestClient; + afterEach(() => { + xdsClient?.close(); + for (const server of xdsServers) { + server.shutdownServer(); + } + xdsServers = []; + }); + describe('Bootstrap Config Contains No New Fields', () => { + let bootstrap: string; + beforeEach((done) => { + const xdsServer = new XdsServer(); + xdsServers.push(xdsServer); + xdsServer.startServer(error => { + if (error) { + done(error); + return; + } + const cluster = new FakeEdsCluster('cluster1', 'endpoint1', [{backends: [new Backend()], locality:{region: 'region1'}}]); + const routeGroup = new FakeRouteGroup('server.example.com', 'route1', [{cluster: cluster}]); + routeGroup.startAllBackends().then(() => { + xdsServer.setEdsResource(cluster.getEndpointConfig()); + xdsServer.setCdsResource(cluster.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + const bootstrapInfo = { + xds_servers: [xdsServer.getBootstrapServerConfig()], + node: { + id: 'test', + locality: {} + } + }; + bootstrap = JSON.stringify(bootstrapInfo); + done(); + }); + }); + }); + it('Should accept an old-style name', (done) => { + xdsClient = new XdsTestClient('xds:server.example.com', bootstrap); + // There is only one server, so a successful request must go to that server + xdsClient.sendOneCall(done); + }); + it('Should reject a new-style name', (done) => { + xdsClient = new XdsTestClient('xds://xds.authority.com/server.example.com', bootstrap); + xdsClient.sendOneCall(error => { + assert(error); + done(); + }); + }); + }); + describe('New-Style Names on gRPC Client', () => { + let bootstrap: string; + beforeEach((done) => { + const xdsServer = new XdsServer(); + xdsServers.push(xdsServer); + xdsServer.startServer(error => { + if (error) { + done(error); + return; + } + const cluster = new FakeEdsCluster('xdstp://xds.authority.com/envoy.config.cluster.v3.Cluster/cluster1', 'xdstp://xds.authority.com/envoy.config.endpoint.v3.ClusterLoadAssignment/endpoint1', [{backends: [new Backend()], locality:{region: 'region1'}}]); + const routeGroup = new FakeRouteGroup('xdstp://xds.authority.com/envoy.config.listener.v3.Listener/server.example.com', 'xdstp://xds.authority.com/envoy.config.route.v3.RouteConfiguration/route1', [{cluster: cluster}]); + routeGroup.startAllBackends().then(() => { + xdsServer.setEdsResource(cluster.getEndpointConfig()); + xdsServer.setCdsResource(cluster.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + const bootstrapInfo = { + xds_servers: [xdsServer.getBootstrapServerConfig()], + node: { + id: 'test', + locality: {} + }, + "client_default_listener_resource_name_template": "xdstp://xds.authority.com/envoy.config.listener.v3.Listener/%s", + "authorities": { + "xds.authority.com": { + } + } + }; + bootstrap = JSON.stringify(bootstrapInfo); + done(); + }); + }); + }); + it('Should accept a target with no authority', (done) => { + xdsClient = new XdsTestClient('xds:server.example.com', bootstrap); + // There is only one server, so a successful request must go to that server + xdsClient.sendOneCall(done); + }); + it('Should accept a target with a listed authority', (done) => { + xdsClient = new XdsTestClient('xds://xds.authority.com/server.example.com', bootstrap); + // There is only one server, so a successful request must go to that server + xdsClient.sendOneCall(done); + }); + }); + describe('Multiple authorities', () => { + let bootstrap: string; + let defaultRouteGroup: FakeRouteGroup; + let otherRouteGroup: FakeRouteGroup; + beforeEach((done) => { + const defaultServer = new XdsServer(); + xdsServers.push(defaultServer); + const otherServer = new XdsServer(); + xdsServers.push(otherServer); + defaultServer.startServer(error => { + if (error) { + done(error); + return; + } + otherServer.startServer(error => { + if (error) { + done(error); + return; + } + const defaultCluster = new FakeEdsCluster('xdstp://xds.authority.com/envoy.config.cluster.v3.Cluster/cluster1', 'xdstp://xds.authority.com/envoy.config.endpoint.v3.ClusterLoadAssignment/endpoint1', [{backends: [new Backend()], locality:{region: 'region1'}}]); + defaultRouteGroup = new FakeRouteGroup('xdstp://xds.authority.com/envoy.config.listener.v3.Listener/grpc/client/server.example.com?project_id=1234', 'xdstp://xds.authority.com/envoy.config.route.v3.RouteConfiguration/route1', [{cluster: defaultCluster}]); + const otherCluster = new FakeEdsCluster('xdstp://xds.other.com/envoy.config.cluster.v3.Cluster/cluster2', 'xdstp://xds.other.com/envoy.config.endpoint.v3.ClusterLoadAssignment/endpoint2', [{backends: [new Backend()], locality:{region: 'region2'}}]); + otherRouteGroup = new FakeRouteGroup('xdstp://xds.other.com/envoy.config.listener.v3.Listener/server.other.com', 'xdstp://xds.other.com/envoy.config.route.v3.RouteConfiguration/route2', [{cluster: otherCluster}]); + Promise.all([defaultRouteGroup.startAllBackends(), otherRouteGroup.startAllBackends()]).then(() => { + defaultServer.setEdsResource(defaultCluster.getEndpointConfig()); + defaultServer.setCdsResource(defaultCluster.getClusterConfig()); + defaultServer.setRdsResource(defaultRouteGroup.getRouteConfiguration()); + defaultServer.setLdsResource(defaultRouteGroup.getListener()); + otherServer.setEdsResource(otherCluster.getEndpointConfig()); + otherServer.setCdsResource(otherCluster.getClusterConfig()); + otherServer.setRdsResource(otherRouteGroup.getRouteConfiguration()); + otherServer.setLdsResource(otherRouteGroup.getListener()); + const bootstrapInfo = { + xds_servers: [defaultServer.getBootstrapServerConfig()], + node: { + id: 'test', + locality: {} + }, + + // Resource name template for xds: target URIs with no authority. + "client_default_listener_resource_name_template": "xdstp://xds.authority.com/envoy.config.listener.v3.Listener/grpc/client/%s?project_id=1234", + + // Resource name template for xDS-enabled gRPC servers. + "server_listener_resource_name_template": "xdstp://xds.authority.com/envoy.config.listener.v3.Listener/grpc/server/%s?project_id=1234", + + // Authorities map. + "authorities": { + "xds.authority.com": { + "client_listener_resource_name_template": "xdstp://xds.authority.com/envoy.config.listener.v3.Listener/grpc/client/%s?project_id=1234" + }, + "xds.other.com": { + "xds_servers": [otherServer.getBootstrapServerConfig()] + } + } + }; + bootstrap = JSON.stringify(bootstrapInfo); + done(); + }); + }); + }); + }); + it('Should accept a name with no authority', (done) => { + xdsClient = new XdsTestClient('xds:server.example.com', bootstrap); + xdsClient.sendOneCall(error => { + assert.ifError(error); + assert(defaultRouteGroup.haveAllBackendsReceivedTraffic()); + done(); + }); + }); + it('Should accept a with an authority that has no server configured', (done) => { + xdsClient = new XdsTestClient('xds://xds.authority.com/server.example.com', bootstrap); + xdsClient.sendOneCall(error => { + assert.ifError(error); + assert(defaultRouteGroup.haveAllBackendsReceivedTraffic()); + done(); + }); + }); + it('Should accept a name with an authority that has no template configured', (done) => { + xdsClient = new XdsTestClient('xds://xds.other.com/server.other.com', bootstrap); + xdsClient.sendOneCall(error => { + assert.ifError(error); + assert(otherRouteGroup.haveAllBackendsReceivedTraffic()); + done(); + }); + }); + }); +}); diff --git a/packages/grpc-js-xds/test/test-listener-resource-name.ts b/packages/grpc-js-xds/test/test-listener-resource-name.ts new file mode 100644 index 000000000..2aeb2d3d6 --- /dev/null +++ b/packages/grpc-js-xds/test/test-listener-resource-name.ts @@ -0,0 +1,125 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { BootstrapInfo, Node, validateBootstrapConfig } from "../src/xds-bootstrap"; +import { experimental } from "@grpc/grpc-js"; +import * as assert from 'assert'; +import GrpcUri = experimental.GrpcUri; +import { getListenerResourceName } from "../src/resolver-xds"; + +const testNode: Node = { + id: 'test', + locality: {} +}; + +/* Test cases in this file are derived from examples in the xDS federation proposal + * https://github.com/grpc/proposal/blob/master/A47-xds-federation.md */ +describe('Listener resource name evaluation', () => { + describe('No new bootstrap fields', () => { + const bootstrap = validateBootstrapConfig({ + node: testNode, + xds_servers: [] + }); + it('xds:server.example.com', () => { + const target: GrpcUri = { + scheme: 'xds', + path: 'server.example.com' + }; + assert.strictEqual(getListenerResourceName(bootstrap, target), 'server.example.com'); + }); + it('xds://xds.authority.com/server.example.com', () => { + const target: GrpcUri = { + scheme: 'xds', + authority: 'xds.authority.com', + path: 'server.example.com' + }; + assert.throws(() => getListenerResourceName(bootstrap, target), /xds.authority.com/); + }); + }); + describe('New-style names', () => { + const bootstrap = validateBootstrapConfig({ + node: testNode, + xds_servers: [], + client_default_listener_resource_name_template: 'xdstp://xds.authority.com/envoy.config.listener.v3.Listener/%s', + authorities: { + 'xds.authority.com': {} + } + }); + it('xds:server.example.com', () => { + const target: GrpcUri = { + scheme: 'xds', + path: 'server.example.com' + }; + assert.strictEqual(getListenerResourceName(bootstrap, target), 'xdstp://xds.authority.com/envoy.config.listener.v3.Listener/server.example.com'); + }); + it('xds://xds.authority.com/server.example.com', () => { + const target: GrpcUri = { + scheme: 'xds', + authority: 'xds.authority.com', + path: 'server.example.com' + }; + assert.strictEqual(getListenerResourceName(bootstrap, target), 'xdstp://xds.authority.com/envoy.config.listener.v3.Listener/server.example.com'); + }); + }); + describe('Multiple authorities', () => { + const bootstrap = validateBootstrapConfig({ + node: testNode, + xds_servers: [{ + "server_uri": "xds-server.authority.com", + "channel_creds": [ { "type": "google_default" } ] + }], + client_default_listener_resource_name_template: 'xdstp://xds.authority.com/envoy.config.listener.v3.Listener/grpc/client/%s?project_id=1234', + authorities: { + "xds.authority.com": { + "client_listener_resource_name_template": "xdstp://xds.authority.com/envoy.config.listener.v3.Listener/grpc/client/%s?project_id=1234" + }, + + "xds.other.com": { + "xds_servers": [ + { + "server_uri": "xds-server.other.com", + "channel_creds": [ { "type": "google_default" } ] + } + ] + } + } + }); + it('xds:server.example.com', () => { + const target: GrpcUri = { + scheme: 'xds', + path: 'server.example.com' + }; + assert.strictEqual(getListenerResourceName(bootstrap, target), 'xdstp://xds.authority.com/envoy.config.listener.v3.Listener/grpc/client/server.example.com?project_id=1234'); + }); + it('xds://xds.authority.com/server.example.com', () => { + const target: GrpcUri = { + scheme: 'xds', + authority: 'xds.authority.com', + path: 'server.example.com' + }; + assert.strictEqual(getListenerResourceName(bootstrap, target), 'xdstp://xds.authority.com/envoy.config.listener.v3.Listener/grpc/client/server.example.com?project_id=1234'); + }); + it('xds://xds.other.com/server.other.com', () => { + const target: GrpcUri = { + scheme: 'xds', + authority: 'xds.other.com', + path: 'server.other.com' + }; + assert.strictEqual(getListenerResourceName(bootstrap, target), 'xdstp://xds.other.com/envoy.config.listener.v3.Listener/server.other.com'); + }); + }); +}); diff --git a/packages/grpc-js-xds/test/test-nack.ts b/packages/grpc-js-xds/test/test-nack.ts new file mode 100644 index 000000000..ce6b6f45b --- /dev/null +++ b/packages/grpc-js-xds/test/test-nack.ts @@ -0,0 +1,161 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import * as assert from 'assert'; +import { register } from "../src"; +import { Cluster } from '../src/generated/envoy/config/cluster/v3/Cluster'; +import { Backend } from "./backend"; +import { XdsTestClient } from "./client"; +import { FakeEdsCluster, FakeRouteGroup } from "./framework"; +import { XdsServer } from "./xds-server"; + +register(); + +describe('Validation errors', () => { + let xdsServer: XdsServer; + let client: XdsTestClient; + beforeEach(done => { + xdsServer = new XdsServer(); + xdsServer.startServer(error => { + done(error); + }); + }); + afterEach(() => { + client?.close(); + xdsServer?.shutdownServer(); + }); + it('Should continue to use a valid resource after receiving an invalid EDS update', done => { + const cluster = new FakeEdsCluster('cluster1', 'endpoint1', [{backends: [new Backend()], locality: {region: 'region1'}}]); + const routeGroup = new FakeRouteGroup('listener1', 'route1', [{cluster: cluster}]); + routeGroup.startAllBackends().then(() => { + xdsServer.setEdsResource(cluster.getEndpointConfig()); + xdsServer.setCdsResource(cluster.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + client = XdsTestClient.createFromServer('listener1', xdsServer); + client.startCalls(100); + routeGroup.waitForAllBackendsToReceiveTraffic().then(() => { + // After backends receive calls, set invalid EDS resource + const invalidEdsResource = {cluster_name: cluster.getEndpointConfig().cluster_name, endpoints: [{}]}; + xdsServer.setEdsResource(invalidEdsResource); + let seenNack = false; + xdsServer.addResponseListener((typeUrl, responseState) => { + if (responseState.state === 'NACKED') { + if (seenNack) { + return; + } + seenNack = true; + routeGroup.waitForAllBackendsToReceiveTraffic().then(() => { + client.stopCalls(); + done(); + }); + } + }); + }, reason => done(reason)); + }, reason => done(reason)); + }); + it('Should continue to use a valid resource after receiving an invalid CDS update', done => { + const cluster = new FakeEdsCluster('cluster1', 'endpoint1', [{backends: [new Backend()], locality: {region: 'region1'}}]); + const routeGroup = new FakeRouteGroup('listener1', 'route1', [{cluster: cluster}]); + routeGroup.startAllBackends().then(() => { + xdsServer.setEdsResource(cluster.getEndpointConfig()); + xdsServer.setCdsResource(cluster.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + client = XdsTestClient.createFromServer('listener1', xdsServer); + client.startCalls(100); + routeGroup.waitForAllBackendsToReceiveTraffic().then(() => { + // After backends receive calls, set invalid CDS resource + const invalidCdsResource: Cluster = {name: cluster.getClusterConfig().name, type: 'EDS'}; + xdsServer.setCdsResource(invalidCdsResource); + let seenNack = false; + xdsServer.addResponseListener((typeUrl, responseState) => { + if (responseState.state === 'NACKED') { + if (seenNack) { + return; + } + seenNack = true; + routeGroup.waitForAllBackendsToReceiveTraffic().then(() => { + client.stopCalls(); + done(); + }); + } + }); + }, reason => done(reason)); + }, reason => done(reason)); + }); + it('Should continue to use a valid resource after receiving an invalid RDS update', done => { + const cluster = new FakeEdsCluster('cluster1', 'endpoint1', [{backends: [new Backend()], locality: {region: 'region1'}}]); + const routeGroup = new FakeRouteGroup('listener1', 'route1', [{cluster: cluster}]); + routeGroup.startAllBackends().then(() => { + xdsServer.setEdsResource(cluster.getEndpointConfig()); + xdsServer.setCdsResource(cluster.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + client = XdsTestClient.createFromServer('listener1', xdsServer); + client.startCalls(100); + routeGroup.waitForAllBackendsToReceiveTraffic().then(() => { + // After backends receive calls, set invalid RDS resource + const invalidRdsResource = {name: routeGroup.getRouteConfiguration().name, virtual_hosts: [{domains: ['**']}]}; + xdsServer.setRdsResource(invalidRdsResource); + let seenNack = false; + xdsServer.addResponseListener((typeUrl, responseState) => { + if (responseState.state === 'NACKED') { + if (seenNack) { + return; + } + seenNack = true; + routeGroup.waitForAllBackendsToReceiveTraffic().then(() => { + client.stopCalls(); + done(); + }); + } + }); + }, reason => done(reason)); + }, reason => done(reason)); + }); + it('Should continue to use a valid resource after receiving an invalid LDS update', done => { + const cluster = new FakeEdsCluster('cluster1', 'endpoint1', [{backends: [new Backend()], locality: {region: 'region1'}}]); + const routeGroup = new FakeRouteGroup('listener1', 'route1', [{cluster: cluster}]); + routeGroup.startAllBackends().then(() => { + xdsServer.setEdsResource(cluster.getEndpointConfig()); + xdsServer.setCdsResource(cluster.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + client = XdsTestClient.createFromServer('listener1', xdsServer); + client.startCalls(100); + routeGroup.waitForAllBackendsToReceiveTraffic().then(() => { + // After backends receive calls, set invalid LDS resource + const invalidLdsResource = {name: routeGroup.getListener().name}; + xdsServer.setLdsResource(invalidLdsResource); + let seenNack = false; + xdsServer.addResponseListener((typeUrl, responseState) => { + if (responseState.state === 'NACKED') { + if (seenNack) { + return; + } + seenNack = true; + routeGroup.waitForAllBackendsToReceiveTraffic().then(() => { + client.stopCalls(); + done(); + }); + } + }); + }, reason => done(reason)); + }, reason => done(reason)); + }); +}); diff --git a/packages/grpc-js-xds/test/test-ring-hash.ts b/packages/grpc-js-xds/test/test-ring-hash.ts new file mode 100644 index 000000000..20d9eeed1 --- /dev/null +++ b/packages/grpc-js-xds/test/test-ring-hash.ts @@ -0,0 +1,173 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { Backend } from "./backend"; +import { XdsTestClient } from "./client"; +import { FakeEdsCluster, FakeRouteGroup } from "./framework"; +import { XdsServer } from "./xds-server"; + +import { register } from "../src"; +import assert = require("assert"); +import { Any } from "../src/generated/google/protobuf/Any"; +import { AnyExtension } from "@grpc/proto-loader"; +import { RingHash } from "../src/generated/envoy/extensions/load_balancing_policies/ring_hash/v3/RingHash"; +import { EXPERIMENTAL_RING_HASH } from "../src/environment"; + +register(); + +describe('Ring hash LB policy', () => { + let xdsServer: XdsServer; + let client: XdsTestClient; + beforeEach(done => { + xdsServer = new XdsServer(); + xdsServer.startServer(error => { + done(error); + }); + }); + afterEach(() => { + client?.close(); + xdsServer?.shutdownServer(); + }); + it('Should route requests to the single backend with the old lbPolicy field', function(done) { + if (!EXPERIMENTAL_RING_HASH) { + this.skip(); + } + const cluster = new FakeEdsCluster('cluster1', 'endpoint1', [{backends: [new Backend()], locality:{region: 'region1'}}], 'RING_HASH'); + const routeGroup = new FakeRouteGroup('listener1', 'route1', [{cluster: cluster}]); + routeGroup.startAllBackends().then(() => { + xdsServer.setEdsResource(cluster.getEndpointConfig()); + xdsServer.setCdsResource(cluster.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + xdsServer.addResponseListener((typeUrl, responseState) => { + if (responseState.state === 'NACKED') { + client.stopCalls(); + assert.fail(`Client NACKED ${typeUrl} resource with message ${responseState.errorMessage}`); + } + }) + client = XdsTestClient.createFromServer('listener1', xdsServer); + client.sendOneCall(done); + }, reason => done(reason)); + }); + it('Should route requests to the single backend with the new load_balancing_policy field', function(done) { + if (!EXPERIMENTAL_RING_HASH) { + this.skip(); + } + const lbPolicy: AnyExtension & RingHash = { + '@type': 'type.googleapis.com/envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash', + hash_function: 'XX_HASH' + }; + const cluster = new FakeEdsCluster('cluster1', 'endpoint1', [{backends: [new Backend()], locality:{region: 'region1'}}], lbPolicy); + const routeGroup = new FakeRouteGroup('listener1', 'route1', [{cluster: cluster}]); + routeGroup.startAllBackends().then(() => { + xdsServer.setEdsResource(cluster.getEndpointConfig()); + xdsServer.setCdsResource(cluster.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + xdsServer.addResponseListener((typeUrl, responseState) => { + if (responseState.state === 'NACKED') { + client.stopCalls(); + assert.fail(`Client NACKED ${typeUrl} resource with message ${responseState.errorMessage}`); + } + }) + client = XdsTestClient.createFromServer('listener1', xdsServer); + client.sendOneCall(done); + }, reason => done(reason)); + }); + it('Should route all identical requests to the same backend', function(done) { + if (!EXPERIMENTAL_RING_HASH) { + this.skip(); + } + const backend1 = new Backend(); + const backend2 = new Backend() + const cluster = new FakeEdsCluster('cluster1', 'endpoint1', [{backends: [backend1, backend2], locality:{region: 'region1'}}], 'RING_HASH'); + const routeGroup = new FakeRouteGroup('listener1', 'route1', [{cluster: cluster}]); + routeGroup.startAllBackends().then(() => { + xdsServer.setEdsResource(cluster.getEndpointConfig()); + xdsServer.setCdsResource(cluster.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + xdsServer.addResponseListener((typeUrl, responseState) => { + if (responseState.state === 'NACKED') { + client.stopCalls(); + assert.fail(`Client NACKED ${typeUrl} resource with message ${responseState.errorMessage}`); + } + }) + client = XdsTestClient.createFromServer('listener1', xdsServer); + client.sendNCalls(10, error => { + assert.ifError(error); + assert((backend1.getCallCount() === 0) !== (backend2.getCallCount() === 0)); + done(); + }) + }, reason => done(reason)); + }); + it('Should fallback to a second backend if the first one goes down', function(done) { + if (!EXPERIMENTAL_RING_HASH) { + this.skip(); + } + const backends = [new Backend(), new Backend(), new Backend()]; + const cluster = new FakeEdsCluster('cluster1', 'endpoint1', [{backends: backends, locality:{region: 'region1'}}], 'RING_HASH'); + const routeGroup = new FakeRouteGroup('listener1', 'route1', [{cluster: cluster}]); + routeGroup.startAllBackends().then(() => { + xdsServer.setEdsResource(cluster.getEndpointConfig()); + xdsServer.setCdsResource(cluster.getClusterConfig()); + xdsServer.setRdsResource(routeGroup.getRouteConfiguration()); + xdsServer.setLdsResource(routeGroup.getListener()); + xdsServer.addResponseListener((typeUrl, responseState) => { + if (responseState.state === 'NACKED') { + client.stopCalls(); + assert.fail(`Client NACKED ${typeUrl} resource with message ${responseState.errorMessage}`); + } + }) + client = XdsTestClient.createFromServer('listener1', xdsServer); + client.sendNCalls(100, error => { + assert.ifError(error); + let backendWithTraffic: number | null = null; + for (let i = 0; i < backends.length; i++) { + if (backendWithTraffic === null) { + if (backends[i].getCallCount() > 0) { + backendWithTraffic = i; + } + } else { + assert.strictEqual(backends[i].getCallCount(), 0, `Backends ${backendWithTraffic} and ${i} both got traffic`); + } + } + assert.notStrictEqual(backendWithTraffic, null, 'No backend got traffic'); + backends[backendWithTraffic!].shutdown(error => { + assert.ifError(error); + backends[backendWithTraffic!].resetCallCount(); + client.sendNCalls(100, error => { + assert.ifError(error); + let backendWithTraffic2: number | null = null; + for (let i = 0; i < backends.length; i++) { + if (backendWithTraffic2 === null) { + if (backends[i].getCallCount() > 0) { + backendWithTraffic2 = i; + } + } else { + assert.strictEqual(backends[i].getCallCount(), 0, `Backends ${backendWithTraffic2} and ${i} both got traffic`); + } + } + assert.notStrictEqual(backendWithTraffic2, null, 'No backend got traffic'); + assert.notStrictEqual(backendWithTraffic2, backendWithTraffic, `Traffic went to the same backend ${backendWithTraffic} after shutdown`); + done(); + }); + }); + }); + }, reason => done(reason)); + }) +}); diff --git a/packages/grpc-js-xds/test/xds-server.ts b/packages/grpc-js-xds/test/xds-server.ts new file mode 100644 index 000000000..d8500c836 --- /dev/null +++ b/packages/grpc-js-xds/test/xds-server.ts @@ -0,0 +1,355 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { ServerDuplexStream, Server, UntypedServiceImplementation, ServerCredentials, loadPackageDefinition } from "@grpc/grpc-js"; +import { AnyExtension, loadSync } from "@grpc/proto-loader"; +import { EventEmitter } from "stream"; +import { Cluster } from "../src/generated/envoy/config/cluster/v3/Cluster"; +import { ClusterLoadAssignment } from "../src/generated/envoy/config/endpoint/v3/ClusterLoadAssignment"; +import { Listener } from "../src/generated/envoy/config/listener/v3/Listener"; +import { RouteConfiguration } from "../src/generated/envoy/config/route/v3/RouteConfiguration"; +import { AggregatedDiscoveryServiceHandlers } from "../src/generated/envoy/service/discovery/v3/AggregatedDiscoveryService"; +import { DiscoveryRequest__Output } from "../src/generated/envoy/service/discovery/v3/DiscoveryRequest"; +import { DiscoveryResponse } from "../src/generated/envoy/service/discovery/v3/DiscoveryResponse"; +import { Any } from "../src/generated/google/protobuf/Any"; +import { LDS_TYPE_URL, RDS_TYPE_URL, CDS_TYPE_URL, EDS_TYPE_URL, LdsTypeUrl, RdsTypeUrl, CdsTypeUrl, EdsTypeUrl, AdsTypeUrl } from "../src/resources" +import * as adsTypes from '../src/generated/ads'; +import * as lrsTypes from '../src/generated/lrs'; +import { LoadStatsRequest__Output } from "../src/generated/envoy/service/load_stats/v3/LoadStatsRequest"; +import { LoadStatsResponse } from "../src/generated/envoy/service/load_stats/v3/LoadStatsResponse"; + +const loadedProtos = loadPackageDefinition(loadSync( + [ + 'envoy/service/discovery/v3/ads.proto', + 'envoy/service/load_stats/v3/lrs.proto', + 'envoy/config/listener/v3/listener.proto', + 'envoy/config/route/v3/route.proto', + 'envoy/config/cluster/v3/cluster.proto', + 'envoy/config/endpoint/v3/endpoint.proto', + 'envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto', + 'envoy/extensions/clusters/aggregate/v3/cluster.proto', + 'envoy/extensions/load_balancing_policies/round_robin/v3/round_robin.proto', + 'envoy/extensions/load_balancing_policies/wrr_locality/v3/wrr_locality.proto', + 'envoy/extensions/load_balancing_policies/ring_hash/v3/ring_hash.proto', + 'envoy/extensions/load_balancing_policies/pick_first/v3/pick_first.proto', + 'xds/type/v3/typed_struct.proto' + ], + { + keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true, + json: true, + includeDirs: [ + // Paths are relative to src/build + __dirname + '/../../deps/envoy-api/', + __dirname + '/../../deps/xds/', + __dirname + '/../../deps/googleapis/', + __dirname + '/../../deps/protoc-gen-validate/', + ], + })) as unknown as adsTypes.ProtoGrpcType & lrsTypes.ProtoGrpcType; + +type AdsInputType = T extends EdsTypeUrl + ? ClusterLoadAssignment + : T extends CdsTypeUrl + ? Cluster + : T extends RdsTypeUrl + ? RouteConfiguration + : Listener; + +const ADS_TYPE_URLS = new Set([LDS_TYPE_URL, RDS_TYPE_URL, CDS_TYPE_URL, EDS_TYPE_URL]); + +interface ResponseState { + state: 'ACKED' | 'NACKED'; + errorMessage?: string; +} + +interface ResponseListener { + (typeUrl: AdsTypeUrl, responseState: ResponseState): void; +} + +type ResourceAny = AdsInputType & {'@type': T}; + +interface ResourceState { + resource?: ResourceAny; + resourceTypeVersion: number; + subscriptions: Set; +} + +interface ResourceTypeState { + resourceTypeVersion: number; + /** + * Key type is type URL + */ + resourceNameMap: Map>; +} + +interface ResourceMap { + [EDS_TYPE_URL]: ResourceTypeState; + [CDS_TYPE_URL]: ResourceTypeState; + [RDS_TYPE_URL]: ResourceTypeState; + [LDS_TYPE_URL]: ResourceTypeState; +} + +function isAdsTypeUrl(value: string): value is AdsTypeUrl { + return ADS_TYPE_URLS.has(value); +} + +export class XdsServer { + private resourceMap: ResourceMap = { + [EDS_TYPE_URL]: { + resourceTypeVersion: 0, + resourceNameMap: new Map() + }, + [CDS_TYPE_URL]: { + resourceTypeVersion: 0, + resourceNameMap: new Map() + }, + [RDS_TYPE_URL]: { + resourceTypeVersion: 0, + resourceNameMap: new Map() + }, + [LDS_TYPE_URL]: { + resourceTypeVersion: 0, + resourceNameMap: new Map() + }, + }; + private responseListeners = new Set(); + private resourceTypesToIgnore = new Set(); + private clients = new Map>(); + private server: Server | null = null; + private port: number | null = null; + + addResponseListener(listener: ResponseListener) { + this.responseListeners.add(listener); + } + + removeResponseListener(listener: ResponseListener) { + this.responseListeners.delete(listener); + } + + setResource(resource: ResourceAny, name: string) { + const resourceTypeState = this.resourceMap[resource["@type"]] as ResourceTypeState; + resourceTypeState.resourceTypeVersion += 1; + let resourceState: ResourceState | undefined = resourceTypeState.resourceNameMap.get(name); + if (!resourceState) { + resourceState = { + resourceTypeVersion: 0, + subscriptions: new Set() + }; + resourceTypeState.resourceNameMap.set(name, resourceState); + } + resourceState.resourceTypeVersion = resourceTypeState.resourceTypeVersion; + resourceState.resource = resource; + this.sendResourceUpdates(resource['@type'], resourceState.subscriptions, new Set([name])); + } + + setLdsResource(resource: Listener) { + this.setResource({...resource, '@type': LDS_TYPE_URL}, resource.name!); + } + + setRdsResource(resource: RouteConfiguration) { + this.setResource({...resource, '@type': RDS_TYPE_URL}, resource.name!); + } + + setCdsResource(resource: Cluster) { + this.setResource({...resource, '@type': CDS_TYPE_URL}, resource.name!); + } + + setEdsResource(resource: ClusterLoadAssignment) { + this.setResource({...resource, '@type': EDS_TYPE_URL}, resource.cluster_name!); + } + + unsetResource(typeUrl: T, name: string) { + const resourceTypeState = this.resourceMap[typeUrl] as ResourceTypeState; + resourceTypeState.resourceTypeVersion += 1; + let resourceState: ResourceState | undefined = resourceTypeState.resourceNameMap.get(name); + if (resourceState) { + resourceState.resourceTypeVersion = resourceTypeState.resourceTypeVersion; + delete resourceState.resource; + this.sendResourceUpdates(typeUrl, resourceState.subscriptions, new Set([name])); + } + } + + ignoreResourceType(typeUrl: AdsTypeUrl) { + this.resourceTypesToIgnore.add(typeUrl); + } + + private sendResourceUpdates(typeUrl: T, clients: Set, includeResources: Set) { + const resourceTypeState = this.resourceMap[typeUrl] as ResourceTypeState; + const clientResources = new Map(); + for (const [resourceName, resourceState] of resourceTypeState.resourceNameMap) { + /* For RDS and EDS, only send updates for the listed updated resources. + * Otherwise include all resources. */ + if ((typeUrl === RDS_TYPE_URL || typeUrl === EDS_TYPE_URL) && !includeResources.has(resourceName)) { + continue; + } + if (!resourceState.resource) { + continue; + } + for (const clientName of clients) { + if (!resourceState.subscriptions.has(clientName)) { + continue; + } + let resourcesList = clientResources.get(clientName); + if (!resourcesList) { + resourcesList = []; + clientResources.set(clientName, resourcesList); + } + resourcesList.push(resourceState.resource); + } + } + for (const [clientName, resourceList] of clientResources) { + this.clients.get(clientName)?.write({ + resources: resourceList, + version_info: resourceTypeState.resourceTypeVersion.toString(), + nonce: resourceTypeState.resourceTypeVersion.toString(), + type_url: typeUrl + }); + } + } + + private updateResponseListeners(typeUrl: AdsTypeUrl, responseState: ResponseState) { + for (const listener of this.responseListeners) { + listener(typeUrl, responseState); + } + } + + private maybeSubscribe(typeUrl: T, client: string, resourceName: string): boolean { + const resourceTypeState = this.resourceMap[typeUrl] as ResourceTypeState; + let resourceState = resourceTypeState.resourceNameMap.get(resourceName); + if (!resourceState) { + resourceState = { + resourceTypeVersion: 0, + subscriptions: new Set() + }; + resourceTypeState.resourceNameMap.set(resourceName, resourceState); + } + const newlySubscribed = !resourceState.subscriptions.has(client); + resourceState.subscriptions.add(client); + return newlySubscribed; + } + + private handleUnsubscriptions(typeUrl: AdsTypeUrl, client: string, requestedResourceNames?: Set) { + const resourceTypeState = this.resourceMap[typeUrl]; + for (const [resourceName, resourceState] of resourceTypeState.resourceNameMap) { + if (!requestedResourceNames || !requestedResourceNames.has(resourceName)) { + resourceState.subscriptions.delete(client); + if (!resourceState.resource && resourceState.subscriptions.size === 0) { + resourceTypeState.resourceNameMap.delete(resourceName) + } + } + } + } + + private handleRequest(clientName: string, request: DiscoveryRequest__Output) { + if (!isAdsTypeUrl(request.type_url)) { + console.error(`Received ADS request with unsupported type_url ${request.type_url}`); + return; + } + const clientResourceVersion = request.version_info === '' ? 0 : Number.parseInt(request.version_info); + if (request.error_detail) { + this.updateResponseListeners(request.type_url, {state: 'NACKED', errorMessage: request.error_detail.message}); + } else { + this.updateResponseListeners(request.type_url, {state: 'ACKED'}); + } + const requestedResourceNames = new Set(request.resource_names); + const resourceTypeState = this.resourceMap[request.type_url]; + const updatedResources = new Set(); + for (const resourceName of requestedResourceNames) { + if (this.maybeSubscribe(request.type_url, clientName, resourceName) || resourceTypeState.resourceNameMap.get(resourceName)!.resourceTypeVersion > clientResourceVersion) { + updatedResources.add(resourceName); + } + } + this.handleUnsubscriptions(request.type_url, clientName, requestedResourceNames); + if (updatedResources.size > 0) { + this.sendResourceUpdates(request.type_url, new Set([clientName]), updatedResources); + } + } + + StreamAggregatedResources(call: ServerDuplexStream) { + const clientName = call.getPeer(); + this.clients.set(clientName, call); + call.on('data', (request: DiscoveryRequest__Output) => { + this.handleRequest(clientName, request); + }); + call.on('end', () => { + this.clients.delete(clientName); + for (const typeUrl of ADS_TYPE_URLS) { + this.handleUnsubscriptions(typeUrl as AdsTypeUrl, clientName); + } + call.end(); + }); + } + + StreamLoadStats(call: ServerDuplexStream) { + const statsResponse = {load_reporting_interval: {seconds: 30}}; + call.write(statsResponse); + call.on('data', (request: LoadStatsRequest__Output) => { + call.write(statsResponse); + }); + call.on('end', () => { + call.end(); + }); + } + + startServer(callback: (error: Error | null, port: number) => void) { + if (this.server) { + return; + } + const server = new Server(); + server.addService(loadedProtos.envoy.service.discovery.v3.AggregatedDiscoveryService.service, this as unknown as UntypedServiceImplementation); + server.addService(loadedProtos.envoy.service.load_stats.v3.LoadReportingService.service, this as unknown as UntypedServiceImplementation); + server.bindAsync('localhost:0', ServerCredentials.createInsecure(), (error, port) => { + if (!error) { + this.server = server; + this.port = port; + server.start(); + } + callback(error, port); + }); + } + + shutdownServer() { + this.server?.forceShutdown(); + } + + getBootstrapServerConfig() { + if (this.port === null) { + throw new Error('Bootstrap info unavailable; server not started'); + } + return { + server_uri: `localhost:${this.port}`, + channel_creds: [{type: 'insecure'}] + }; + } + + getBootstrapInfoString(): string { + if (this.port === null) { + throw new Error('Bootstrap info unavailable; server not started'); + } + const bootstrapInfo = { + xds_servers: [this.getBootstrapServerConfig()], + node: { + id: 'test', + locality: {} + } + } + return JSON.stringify(bootstrapInfo); + } +} diff --git a/packages/grpc-js-xds/tsconfig.json b/packages/grpc-js-xds/tsconfig.json index c121a5f6d..24212dfc2 100644 --- a/packages/grpc-js-xds/tsconfig.json +++ b/packages/grpc-js-xds/tsconfig.json @@ -3,8 +3,8 @@ "compilerOptions": { "rootDir": ".", "outDir": "build", - "target": "es2017", - "lib": ["es2017"], + "target": "es2020", + "lib": ["es2020"], "module": "commonjs", "incremental": true }, diff --git a/packages/grpc-js/.eslintrc b/packages/grpc-js/.eslintrc index 64585682c..9a72b31de 100644 --- a/packages/grpc-js/.eslintrc +++ b/packages/grpc-js/.eslintrc @@ -1,11 +1,62 @@ { "root": true, - "extends": "./node_modules/gts", + + "extends": [ + "eslint:recommended", + "plugin:node/recommended", + "plugin:@typescript-eslint/recommended", + "plugin:prettier/recommended" + ], + "plugins": ["node", "prettier", "@typescript-eslint"], + "parser": "@typescript-eslint/parser", + "parserOptions": { + "ecmaVersion": 2018, + "sourceType": "module" + }, + "ignorePatterns": ["**/generated/**", "**/node_modules/**", "**/build/**"], "rules": { - "node/no-unpublished-import": ["error", { + "node/no-unpublished-import": [ + "error", + { "tryExtensions": [".ts", ".js", ".json", ".node"] - }], + } + ], "@typescript-eslint/no-unused-vars": "off", - "node/no-unpublished-require": "off" + "node/no-unpublished-require": "off", + "prettier/prettier": "error", + "block-scoped-var": "error", + "eqeqeq": "error", + "no-var": "error", + "prefer-const": "error", + "no-case-declarations": "warn", + "no-restricted-properties": [ + "error", + { + "object": "describe", + "property": "only" + }, + { + "object": "it", + "property": "only" + } + ], + + "@typescript-eslint/no-non-null-assertion": "off", + "@typescript-eslint/no-use-before-define": "off", + "@typescript-eslint/no-warning-comments": "off", + "@typescript-eslint/no-empty-function": "off", + "@typescript-eslint/no-var-requires": "off", + "@typescript-eslint/explicit-function-return-type": "off", + "@typescript-eslint/explicit-module-boundary-types": "off", + "@typescript-eslint/ban-types": "off", + "@typescript-eslint/camelcase": "off", + "@typescript-eslint/no-explicit-any": "off", + "node/no-missing-import": "off", + "node/no-empty-function": "off", + "node/no-unsupported-features/es-syntax": "off", + "node/no-missing-require": "off", + "node/shebang": "off", + "no-dupe-class-members": "off", + "require-atomic-updates": "off" } } diff --git a/packages/grpc-js/README.md b/packages/grpc-js/README.md index 4bb4da024..f3b682f3c 100644 --- a/packages/grpc-js/README.md +++ b/packages/grpc-js/README.md @@ -19,12 +19,14 @@ Documentation specifically for the `@grpc/grpc-js` package is currently not avai - Servers - Streaming - Metadata -- Partial compression support: clients can decompress response messages +- Partial compression support: clients can compress and decompress messages, and servers can decompress request messages - Pick first and round robin load balancing policies - Client Interceptors - Connection Keepalives - HTTP Connect support (proxies) +If you need a feature from the `grpc` package that is not provided by the `@grpc/grpc-js`, please file a feature request with that information. + This library does not directly handle `.proto` files. To use `.proto` files with this library we recommend using the `@grpc/proto-loader` package. ## Migrating from [`grpc`](https://www.npmjs.com/package/grpc) @@ -34,6 +36,41 @@ This library does not directly handle `.proto` files. To use `.proto` files with - If you are currently loading `.proto` files using `grpc.load`, that function is not available in this library. You should instead load your `.proto` files using `@grpc/proto-loader` and load the resulting package definition objects into `@grpc/grpc-js` using `grpc.loadPackageDefinition`. - If you are currently loading packages generated by `grpc-tools`, you should instead generate your files using the `generate_package_definition` option in `grpc-tools`, then load the object exported by the generated file into `@grpc/grpc-js` using `grpc.loadPackageDefinition`. - If you have a server and you are using `Server#bind` to bind ports, you will need to use `Server#bindAsync` instead. +- If you are using any channel options supported in `grpc` but not supported in `@grpc/grpc-js`, you may need to adjust your code to handle the different behavior. Refer to [the list of supported options](#supported-channel-options) below. +- Refer to the [detailed package comparison](https://github.com/grpc/grpc-node/blob/master/PACKAGE-COMPARISON.md) for more details on the differences between `grpc` and `@grpc/grpc-js`. + +## Supported Channel Options +Many channel arguments supported in `grpc` are not supported in `@grpc/grpc-js`. The channel arguments supported by `@grpc/grpc-js` are: + - `grpc.ssl_target_name_override` + - `grpc.primary_user_agent` + - `grpc.secondary_user_agent` + - `grpc.default_authority` + - `grpc.keepalive_time_ms` + - `grpc.keepalive_timeout_ms` + - `grpc.keepalive_permit_without_calls` + - `grpc.service_config` + - `grpc.max_concurrent_streams` + - `grpc.initial_reconnect_backoff_ms` + - `grpc.max_reconnect_backoff_ms` + - `grpc.use_local_subchannel_pool` + - `grpc.max_send_message_length` + - `grpc.max_receive_message_length` + - `grpc.enable_http_proxy` + - `grpc.default_compression_algorithm` + - `grpc.enable_channelz` + - `grpc.dns_min_time_between_resolutions_ms` + - `grpc.enable_retries` + - `grpc.max_connection_age_ms` + - `grpc.max_connection_age_grace_ms` + - `grpc.max_connection_idle_ms` + - `grpc.per_rpc_retry_buffer_size` + - `grpc.retry_buffer_size` + - `grpc.service_config_disable_resolution` + - `grpc.client_idle_timeout_ms` + - `grpc-node.max_session_memory` + - `grpc-node.tls_enable_trace` + - `channelOverride` + - `channelFactoryOverride` ## Some Notes on API Guarantees diff --git a/packages/grpc-js/apache-notice.md b/packages/grpc-js/apache-notice.md new file mode 100644 index 000000000..703afce80 --- /dev/null +++ b/packages/grpc-js/apache-notice.md @@ -0,0 +1,4 @@ +The following files contain configuration settings that were derived from [the this commit](https://github.com/google/gts/commit/3b9ab6dd59691f77f5c5c632a44c6762ba4ef7c6) in the Google GTS repository: + - .eslintrc + - prettier.config.js + - tsconfig.json diff --git a/packages/grpc-js/gulpfile.ts b/packages/grpc-js/gulpfile.ts index 6d8d20943..e4e9071ff 100644 --- a/packages/grpc-js/gulpfile.ts +++ b/packages/grpc-js/gulpfile.ts @@ -35,14 +35,17 @@ const pkgPath = path.resolve(jsCoreDir, 'package.json'); const supportedVersionRange = require(pkgPath).engines.node; const versionNotSupported = () => { console.log(`Skipping grpc-js task for Node ${process.version}`); - return () => { return Promise.resolve(); }; + return () => { + return Promise.resolve(); + }; }; const identity = (value: any): any => value; -const checkTask = semver.satisfies(process.version, supportedVersionRange) ? - identity : versionNotSupported; +const checkTask = semver.satisfies(process.version, supportedVersionRange) + ? identity + : versionNotSupported; const execNpmVerb = (verb: string, ...args: string[]) => - execa('npm', [verb, ...args], {cwd: jsCoreDir, stdio: 'inherit'}); + execa('npm', [verb, ...args], { cwd: jsCoreDir, stdio: 'inherit' }); const execNpmCommand = execNpmVerb.bind(null, 'run'); const install = checkTask(() => execNpmVerb('install', '--unsafe-perm')); @@ -64,21 +67,20 @@ const cleanAll = gulp.parallel(clean); */ const compile = checkTask(() => execNpmCommand('compile')); -const copyTestFixtures = checkTask(() => ncpP(`${jsCoreDir}/test/fixtures`, `${outDir}/test/fixtures`)); +const copyTestFixtures = checkTask(() => + ncpP(`${jsCoreDir}/test/fixtures`, `${outDir}/test/fixtures`) +); const runTests = checkTask(() => { - return gulp.src(`${outDir}/test/**/*.js`) - .pipe(mocha({reporter: 'mocha-jenkins-reporter', - require: ['ts-node/register']})); + process.env.GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION = 'true'; + return gulp.src(`${outDir}/test/**/*.js`).pipe( + mocha({ + reporter: 'mocha-jenkins-reporter', + require: ['ts-node/register'], + }) + ); }); const test = gulp.series(install, copyTestFixtures, runTests); -export { - install, - lint, - clean, - cleanAll, - compile, - test -} +export { install, lint, clean, cleanAll, compile, test }; diff --git a/packages/grpc-js/log.txt b/packages/grpc-js/log.txt deleted file mode 100644 index 7a6bbc2c8..000000000 --- a/packages/grpc-js/log.txt +++ /dev/null @@ -1,971 +0,0 @@ -{ - O: [Getter/Setter], - outDir: [Getter/Setter], - 'out-dir': [Getter/Setter], - _: [ - 'envoy/service/discovery/v2/ads.proto', - 'envoy/api/v2/listener.proto', - 'envoy/api/v2/route.proto', - 'envoy/api/v2/cluster.proto', - 'envoy/api/v2/endpoint.proto' - ], - keepCase: true, - 'keep-case': true, - longs: [Function: String], - enums: [Function: String], - defaults: true, - oneofs: true, - json: true, - includeDirs: [ - 'deps/envoy-api/', - 'deps/udpa/', - 'node_modules/protobufjs/', - 'deps/googleapis/', - 'deps/protoc-gen-validate/' - ], - I: [ - 'deps/envoy-api/', - 'deps/udpa/', - 'node_modules/protobufjs/', - 'deps/googleapis/', - 'deps/protoc-gen-validate/' - ], - 'include-dirs': [ - 'deps/envoy-api/', - 'deps/udpa/', - 'node_modules/protobufjs/', - 'deps/googleapis/', - 'deps/protoc-gen-validate/' - ], - grpcLib: '../index', - 'grpc-lib': '../index', - '$0': 'node_modules/.bin/proto-loader-gen-types' -} -Processing envoy/service/discovery/v2/ads.proto -Writing src/generated//ads.d.ts -Writing src/generated//envoy/service/discovery/v2/AdsDummy.d.ts from file deps/envoy-api/envoy/service/discovery/v2/ads.proto -Writing src/generated//envoy/api/v2/DiscoveryRequest.d.ts from file deps/envoy-api/envoy/api/v2/discovery.proto -Writing src/generated//envoy/api/v2/DiscoveryResponse.d.ts from file deps/envoy-api/envoy/api/v2/discovery.proto -Writing src/generated//envoy/api/v2/DeltaDiscoveryRequest.d.ts from file deps/envoy-api/envoy/api/v2/discovery.proto -Writing src/generated//envoy/api/v2/DeltaDiscoveryResponse.d.ts from file deps/envoy-api/envoy/api/v2/discovery.proto -Writing src/generated//envoy/api/v2/Resource.d.ts from file deps/envoy-api/envoy/api/v2/discovery.proto -Writing src/generated//envoy/api/v2/core/RoutingPriority.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RequestMethod.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/TrafficDirection.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/Locality.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/BuildVersion.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/Extension.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/Node.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/Metadata.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RuntimeUInt32.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RuntimeDouble.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RuntimeFeatureFlag.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/HeaderValue.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/HeaderValueOption.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/HeaderMap.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/DataSource.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RetryPolicy.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RemoteDataSource.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/AsyncDataSource.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/TransportSocket.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RuntimeFractionalPercent.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/ControlPlane.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/Pipe.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/SocketAddress.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/SocketAddress/Protocol.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/TcpKeepalive.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/BindConfig.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/Address.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/CidrRange.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/BackoffStrategy.d.ts from file deps/envoy-api/envoy/api/v2/core/backoff.proto -Writing src/generated//envoy/api/v2/core/SocketOption.d.ts from file deps/envoy-api/envoy/api/v2/core/socket_option.proto -Writing src/generated//envoy/api/v2/core/SocketOption/SocketState.d.ts from file deps/envoy-api/envoy/api/v2/core/socket_option.proto -Writing src/generated//envoy/api/v2/core/HttpUri.d.ts from file deps/envoy-api/envoy/api/v2/core/http_uri.proto -Writing src/generated//envoy/type/Percent.d.ts from file deps/envoy-api/envoy/type/percent.proto -Writing src/generated//envoy/type/FractionalPercent.d.ts from file deps/envoy-api/envoy/type/percent.proto -Writing src/generated//envoy/type/FractionalPercent/DenominatorType.d.ts from file deps/envoy-api/envoy/type/percent.proto -Writing src/generated//envoy/type/SemanticVersion.d.ts from file deps/envoy-api/envoy/type/semantic_version.proto -Writing src/generated//udpa/annotations/PackageVersionStatus.d.ts from file deps/udpa/udpa/annotations/status.proto -Writing src/generated//udpa/annotations/StatusAnnotation.d.ts from file deps/udpa/udpa/annotations/status.proto -Writing src/generated//udpa/annotations/MigrateAnnotation.d.ts from file deps/udpa/udpa/annotations/migrate.proto -Writing src/generated//udpa/annotations/FieldMigrateAnnotation.d.ts from file deps/udpa/udpa/annotations/migrate.proto -Writing src/generated//udpa/annotations/FileMigrateAnnotation.d.ts from file deps/udpa/udpa/annotations/migrate.proto -Writing src/generated//validate/FieldRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/FloatRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/DoubleRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/Int32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/Int64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/UInt32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/UInt64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/SInt32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/SInt64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/Fixed32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/Fixed64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/SFixed32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/SFixed64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/BoolRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/StringRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/KnownRegex.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/BytesRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/EnumRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/MessageRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/RepeatedRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/MapRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/AnyRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/DurationRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/TimestampRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//google/protobuf/Any.d.ts from file null -Writing src/generated//google/protobuf/Duration.d.ts from file null -Writing src/generated//google/protobuf/Struct.d.ts from file null -Writing src/generated//google/protobuf/Value.d.ts from file null -Writing src/generated//google/protobuf/NullValue.d.ts from file null -Writing src/generated//google/protobuf/ListValue.d.ts from file null -Writing src/generated//google/protobuf/DoubleValue.d.ts from file null -Writing src/generated//google/protobuf/FloatValue.d.ts from file null -Writing src/generated//google/protobuf/Int64Value.d.ts from file null -Writing src/generated//google/protobuf/UInt64Value.d.ts from file null -Writing src/generated//google/protobuf/Int32Value.d.ts from file null -Writing src/generated//google/protobuf/UInt32Value.d.ts from file null -Writing src/generated//google/protobuf/BoolValue.d.ts from file null -Writing src/generated//google/protobuf/StringValue.d.ts from file null -Writing src/generated//google/protobuf/BytesValue.d.ts from file null -Writing src/generated//google/protobuf/FileDescriptorSet.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FileDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/DescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/DescriptorProto/ExtensionRange.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/DescriptorProto/ReservedRange.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldDescriptorProto/Type.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldDescriptorProto/Label.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/OneofDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/EnumDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/EnumValueDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/ServiceDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/MethodDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FileOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FileOptions/OptimizeMode.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/MessageOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldOptions/CType.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldOptions/JSType.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/OneofOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/EnumOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/EnumValueOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/ServiceOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/MethodOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/UninterpretedOption.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/UninterpretedOption/NamePart.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/SourceCodeInfo.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/SourceCodeInfo/Location.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/GeneratedCodeInfo.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/GeneratedCodeInfo/Annotation.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/Timestamp.d.ts from file null -Writing src/generated//google/rpc/Status.d.ts from file deps/googleapis/google/rpc/status.proto -Processing envoy/api/v2/listener.proto -Writing src/generated//listener.d.ts -Writing src/generated//envoy/api/v2/Listener.d.ts from file deps/envoy-api/envoy/api/v2/listener.proto -Writing src/generated//envoy/api/v2/Listener/DrainType.d.ts from file deps/envoy-api/envoy/api/v2/listener.proto -Writing src/generated//envoy/api/v2/Listener/DeprecatedV1.d.ts from file deps/envoy-api/envoy/api/v2/listener.proto -Writing src/generated//envoy/api/v2/Listener/ConnectionBalanceConfig.d.ts from file deps/envoy-api/envoy/api/v2/listener.proto -Writing src/generated//envoy/api/v2/Listener/ConnectionBalanceConfig/ExactBalance.d.ts from file deps/envoy-api/envoy/api/v2/listener.proto -Writing src/generated//envoy/api/v2/listener/Filter.d.ts from file deps/envoy-api/envoy/api/v2/listener/listener_components.proto -Writing src/generated//envoy/api/v2/listener/FilterChainMatch.d.ts from file deps/envoy-api/envoy/api/v2/listener/listener_components.proto -Writing src/generated//envoy/api/v2/listener/FilterChainMatch/ConnectionSourceType.d.ts from file deps/envoy-api/envoy/api/v2/listener/listener_components.proto -Writing src/generated//envoy/api/v2/listener/FilterChain.d.ts from file deps/envoy-api/envoy/api/v2/listener/listener_components.proto -Writing src/generated//envoy/api/v2/listener/ListenerFilterChainMatchPredicate.d.ts from file deps/envoy-api/envoy/api/v2/listener/listener_components.proto -Writing src/generated//envoy/api/v2/listener/ListenerFilterChainMatchPredicate/MatchSet.d.ts from file deps/envoy-api/envoy/api/v2/listener/listener_components.proto -Writing src/generated//envoy/api/v2/listener/ListenerFilter.d.ts from file deps/envoy-api/envoy/api/v2/listener/listener_components.proto -Writing src/generated//envoy/api/v2/listener/UdpListenerConfig.d.ts from file deps/envoy-api/envoy/api/v2/listener/udp_listener_config.proto -Writing src/generated//envoy/api/v2/listener/ActiveRawUdpListenerConfig.d.ts from file deps/envoy-api/envoy/api/v2/listener/udp_listener_config.proto -Writing src/generated//envoy/api/v2/core/SocketOption.d.ts from file deps/envoy-api/envoy/api/v2/core/socket_option.proto -Writing src/generated//envoy/api/v2/core/SocketOption/SocketState.d.ts from file deps/envoy-api/envoy/api/v2/core/socket_option.proto -Writing src/generated//envoy/api/v2/core/RoutingPriority.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RequestMethod.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/TrafficDirection.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/Locality.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/BuildVersion.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/Extension.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/Node.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/Metadata.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RuntimeUInt32.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RuntimeDouble.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RuntimeFeatureFlag.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/HeaderValue.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/HeaderValueOption.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/HeaderMap.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/DataSource.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RetryPolicy.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RemoteDataSource.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/AsyncDataSource.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/TransportSocket.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RuntimeFractionalPercent.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/ControlPlane.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/Pipe.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/SocketAddress.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/SocketAddress/Protocol.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/TcpKeepalive.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/BindConfig.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/Address.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/CidrRange.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/BackoffStrategy.d.ts from file deps/envoy-api/envoy/api/v2/core/backoff.proto -Writing src/generated//envoy/api/v2/core/HttpUri.d.ts from file deps/envoy-api/envoy/api/v2/core/http_uri.proto -Writing src/generated//envoy/api/v2/core/ApiVersion.d.ts from file deps/envoy-api/envoy/api/v2/core/config_source.proto -Writing src/generated//envoy/api/v2/core/ApiConfigSource.d.ts from file deps/envoy-api/envoy/api/v2/core/config_source.proto -Writing src/generated//envoy/api/v2/core/ApiConfigSource/ApiType.d.ts from file deps/envoy-api/envoy/api/v2/core/config_source.proto -Writing src/generated//envoy/api/v2/core/AggregatedConfigSource.d.ts from file deps/envoy-api/envoy/api/v2/core/config_source.proto -Writing src/generated//envoy/api/v2/core/SelfConfigSource.d.ts from file deps/envoy-api/envoy/api/v2/core/config_source.proto -Writing src/generated//envoy/api/v2/core/RateLimitSettings.d.ts from file deps/envoy-api/envoy/api/v2/core/config_source.proto -Writing src/generated//envoy/api/v2/core/ConfigSource.d.ts from file deps/envoy-api/envoy/api/v2/core/config_source.proto -Writing src/generated//envoy/api/v2/core/GrpcService.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/EnvoyGrpc.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/SslCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/GoogleLocalCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/ChannelCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/CallCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/CallCredentials/ServiceAccountJWTAccessCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/CallCredentials/GoogleIAMCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/CallCredentials/MetadataCredentialsFromPlugin.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/CallCredentials/StsService.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/auth/UpstreamTlsContext.d.ts from file deps/envoy-api/envoy/api/v2/auth/tls.proto -Writing src/generated//envoy/api/v2/auth/DownstreamTlsContext.d.ts from file deps/envoy-api/envoy/api/v2/auth/tls.proto -Writing src/generated//envoy/api/v2/auth/CommonTlsContext.d.ts from file deps/envoy-api/envoy/api/v2/auth/tls.proto -Writing src/generated//envoy/api/v2/auth/CommonTlsContext/CombinedCertificateValidationContext.d.ts from file deps/envoy-api/envoy/api/v2/auth/tls.proto -Writing src/generated//envoy/api/v2/auth/GenericSecret.d.ts from file deps/envoy-api/envoy/api/v2/auth/secret.proto -Writing src/generated//envoy/api/v2/auth/SdsSecretConfig.d.ts from file deps/envoy-api/envoy/api/v2/auth/secret.proto -Writing src/generated//envoy/api/v2/auth/Secret.d.ts from file deps/envoy-api/envoy/api/v2/auth/secret.proto -Writing src/generated//envoy/api/v2/auth/TlsParameters.d.ts from file deps/envoy-api/envoy/api/v2/auth/common.proto -Writing src/generated//envoy/api/v2/auth/TlsParameters/TlsProtocol.d.ts from file deps/envoy-api/envoy/api/v2/auth/common.proto -Writing src/generated//envoy/api/v2/auth/PrivateKeyProvider.d.ts from file deps/envoy-api/envoy/api/v2/auth/common.proto -Writing src/generated//envoy/api/v2/auth/TlsCertificate.d.ts from file deps/envoy-api/envoy/api/v2/auth/common.proto -Writing src/generated//envoy/api/v2/auth/TlsSessionTicketKeys.d.ts from file deps/envoy-api/envoy/api/v2/auth/common.proto -Writing src/generated//envoy/api/v2/auth/CertificateValidationContext.d.ts from file deps/envoy-api/envoy/api/v2/auth/common.proto -Writing src/generated//envoy/api/v2/auth/CertificateValidationContext/TrustChainVerification.d.ts from file deps/envoy-api/envoy/api/v2/auth/common.proto -Writing src/generated//envoy/api/v2/route/VirtualHost.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/VirtualHost/TlsRequirementType.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/FilterAction.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/Route.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/WeightedCluster.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/WeightedCluster/ClusterWeight.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteMatch.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteMatch/GrpcRouteMatchOptions.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteMatch/TlsContextMatchOptions.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/CorsPolicy.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteAction.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteAction/ClusterNotFoundResponseCode.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteAction/InternalRedirectAction.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteAction/RequestMirrorPolicy.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteAction/HashPolicy.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteAction/HashPolicy/Header.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteAction/HashPolicy/Cookie.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteAction/HashPolicy/ConnectionProperties.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteAction/HashPolicy/QueryParameter.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteAction/HashPolicy/FilterState.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteAction/UpgradeConfig.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RetryPolicy.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RetryPolicy/RetryPriority.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RetryPolicy/RetryHostPredicate.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RetryPolicy/RetryBackOff.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/HedgePolicy.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RedirectAction.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RedirectAction/RedirectResponseCode.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/DirectResponseAction.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/Decorator.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/Tracing.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/VirtualCluster.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RateLimit.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RateLimit/Action.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RateLimit/Action/SourceCluster.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RateLimit/Action/DestinationCluster.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RateLimit/Action/RequestHeaders.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RateLimit/Action/RemoteAddress.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RateLimit/Action/GenericKey.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RateLimit/Action/HeaderValueMatch.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/HeaderMatcher.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/QueryParameterMatcher.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/config/listener/v2/ApiListener.d.ts from file deps/envoy-api/envoy/config/listener/v2/api_listener.proto -Writing src/generated//envoy/config/filter/accesslog/v2/AccessLog.d.ts from file deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto -Writing src/generated//envoy/config/filter/accesslog/v2/AccessLogFilter.d.ts from file deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto -Writing src/generated//envoy/config/filter/accesslog/v2/ComparisonFilter.d.ts from file deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto -Writing src/generated//envoy/config/filter/accesslog/v2/ComparisonFilter/Op.d.ts from file deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto -Writing src/generated//envoy/config/filter/accesslog/v2/StatusCodeFilter.d.ts from file deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto -Writing src/generated//envoy/config/filter/accesslog/v2/DurationFilter.d.ts from file deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto -Writing src/generated//envoy/config/filter/accesslog/v2/NotHealthCheckFilter.d.ts from file deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto -Writing src/generated//envoy/config/filter/accesslog/v2/TraceableFilter.d.ts from file deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto -Writing src/generated//envoy/config/filter/accesslog/v2/RuntimeFilter.d.ts from file deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto -Writing src/generated//envoy/config/filter/accesslog/v2/AndFilter.d.ts from file deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto -Writing src/generated//envoy/config/filter/accesslog/v2/OrFilter.d.ts from file deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto -Writing src/generated//envoy/config/filter/accesslog/v2/HeaderFilter.d.ts from file deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto -Writing src/generated//envoy/config/filter/accesslog/v2/ResponseFlagFilter.d.ts from file deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto -Writing src/generated//envoy/config/filter/accesslog/v2/GrpcStatusFilter.d.ts from file deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto -Writing src/generated//envoy/config/filter/accesslog/v2/GrpcStatusFilter/Status.d.ts from file deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto -Writing src/generated//envoy/config/filter/accesslog/v2/ExtensionFilter.d.ts from file deps/envoy-api/envoy/config/filter/accesslog/v2/accesslog.proto -Writing src/generated//envoy/type/Percent.d.ts from file deps/envoy-api/envoy/type/percent.proto -Writing src/generated//envoy/type/FractionalPercent.d.ts from file deps/envoy-api/envoy/type/percent.proto -Writing src/generated//envoy/type/FractionalPercent/DenominatorType.d.ts from file deps/envoy-api/envoy/type/percent.proto -Writing src/generated//envoy/type/SemanticVersion.d.ts from file deps/envoy-api/envoy/type/semantic_version.proto -Writing src/generated//envoy/type/Int64Range.d.ts from file deps/envoy-api/envoy/type/range.proto -Writing src/generated//envoy/type/Int32Range.d.ts from file deps/envoy-api/envoy/type/range.proto -Writing src/generated//envoy/type/DoubleRange.d.ts from file deps/envoy-api/envoy/type/range.proto -Writing src/generated//envoy/type/matcher/RegexMatcher.d.ts from file deps/envoy-api/envoy/type/matcher/regex.proto -Writing src/generated//envoy/type/matcher/RegexMatcher/GoogleRE2.d.ts from file deps/envoy-api/envoy/type/matcher/regex.proto -Writing src/generated//envoy/type/matcher/RegexMatchAndSubstitute.d.ts from file deps/envoy-api/envoy/type/matcher/regex.proto -Writing src/generated//envoy/type/matcher/StringMatcher.d.ts from file deps/envoy-api/envoy/type/matcher/string.proto -Writing src/generated//envoy/type/matcher/ListStringMatcher.d.ts from file deps/envoy-api/envoy/type/matcher/string.proto -Writing src/generated//envoy/type/tracing/v2/CustomTag.d.ts from file deps/envoy-api/envoy/type/tracing/v2/custom_tag.proto -Writing src/generated//envoy/type/tracing/v2/CustomTag/Literal.d.ts from file deps/envoy-api/envoy/type/tracing/v2/custom_tag.proto -Writing src/generated//envoy/type/tracing/v2/CustomTag/Environment.d.ts from file deps/envoy-api/envoy/type/tracing/v2/custom_tag.proto -Writing src/generated//envoy/type/tracing/v2/CustomTag/Header.d.ts from file deps/envoy-api/envoy/type/tracing/v2/custom_tag.proto -Writing src/generated//envoy/type/tracing/v2/CustomTag/Metadata.d.ts from file deps/envoy-api/envoy/type/tracing/v2/custom_tag.proto -Writing src/generated//envoy/type/metadata/v2/MetadataKey.d.ts from file deps/envoy-api/envoy/type/metadata/v2/metadata.proto -Writing src/generated//envoy/type/metadata/v2/MetadataKey/PathSegment.d.ts from file deps/envoy-api/envoy/type/metadata/v2/metadata.proto -Writing src/generated//envoy/type/metadata/v2/MetadataKind.d.ts from file deps/envoy-api/envoy/type/metadata/v2/metadata.proto -Writing src/generated//envoy/type/metadata/v2/MetadataKind/Request.d.ts from file deps/envoy-api/envoy/type/metadata/v2/metadata.proto -Writing src/generated//envoy/type/metadata/v2/MetadataKind/Route.d.ts from file deps/envoy-api/envoy/type/metadata/v2/metadata.proto -Writing src/generated//envoy/type/metadata/v2/MetadataKind/Cluster.d.ts from file deps/envoy-api/envoy/type/metadata/v2/metadata.proto -Writing src/generated//envoy/type/metadata/v2/MetadataKind/Host.d.ts from file deps/envoy-api/envoy/type/metadata/v2/metadata.proto -Writing src/generated//udpa/annotations/MigrateAnnotation.d.ts from file deps/udpa/udpa/annotations/migrate.proto -Writing src/generated//udpa/annotations/FieldMigrateAnnotation.d.ts from file deps/udpa/udpa/annotations/migrate.proto -Writing src/generated//udpa/annotations/FileMigrateAnnotation.d.ts from file deps/udpa/udpa/annotations/migrate.proto -Writing src/generated//udpa/annotations/PackageVersionStatus.d.ts from file deps/udpa/udpa/annotations/status.proto -Writing src/generated//udpa/annotations/StatusAnnotation.d.ts from file deps/udpa/udpa/annotations/status.proto -Writing src/generated//validate/FieldRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/FloatRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/DoubleRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/Int32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/Int64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/UInt32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/UInt64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/SInt32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/SInt64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/Fixed32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/Fixed64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/SFixed32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/SFixed64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/BoolRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/StringRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/KnownRegex.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/BytesRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/EnumRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/MessageRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/RepeatedRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/MapRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/AnyRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/DurationRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/TimestampRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//google/protobuf/Duration.d.ts from file null -Writing src/generated//google/protobuf/DoubleValue.d.ts from file null -Writing src/generated//google/protobuf/FloatValue.d.ts from file null -Writing src/generated//google/protobuf/Int64Value.d.ts from file null -Writing src/generated//google/protobuf/UInt64Value.d.ts from file null -Writing src/generated//google/protobuf/Int32Value.d.ts from file null -Writing src/generated//google/protobuf/UInt32Value.d.ts from file null -Writing src/generated//google/protobuf/BoolValue.d.ts from file null -Writing src/generated//google/protobuf/StringValue.d.ts from file null -Writing src/generated//google/protobuf/BytesValue.d.ts from file null -Writing src/generated//google/protobuf/Any.d.ts from file null -Writing src/generated//google/protobuf/Struct.d.ts from file null -Writing src/generated//google/protobuf/Value.d.ts from file null -Writing src/generated//google/protobuf/NullValue.d.ts from file null -Writing src/generated//google/protobuf/ListValue.d.ts from file null -Writing src/generated//google/protobuf/Timestamp.d.ts from file null -Writing src/generated//google/protobuf/FileDescriptorSet.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FileDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/DescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/DescriptorProto/ExtensionRange.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/DescriptorProto/ReservedRange.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldDescriptorProto/Type.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldDescriptorProto/Label.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/OneofDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/EnumDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/EnumValueDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/ServiceDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/MethodDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FileOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FileOptions/OptimizeMode.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/MessageOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldOptions/CType.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldOptions/JSType.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/OneofOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/EnumOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/EnumValueOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/ServiceOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/MethodOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/UninterpretedOption.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/UninterpretedOption/NamePart.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/SourceCodeInfo.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/SourceCodeInfo/Location.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/GeneratedCodeInfo.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/GeneratedCodeInfo/Annotation.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/Empty.d.ts from file null -Writing src/generated//google/api/Http.d.ts from file node_modules/protobufjs/google/api/http.proto -Writing src/generated//google/api/HttpRule.d.ts from file node_modules/protobufjs/google/api/http.proto -Writing src/generated//google/api/CustomHttpPattern.d.ts from file node_modules/protobufjs/google/api/http.proto -Processing envoy/api/v2/route.proto -Writing src/generated//route.d.ts -Writing src/generated//envoy/api/v2/RouteConfiguration.d.ts from file deps/envoy-api/envoy/api/v2/route.proto -Writing src/generated//envoy/api/v2/Vhds.d.ts from file deps/envoy-api/envoy/api/v2/route.proto -Writing src/generated//envoy/api/v2/core/ApiVersion.d.ts from file deps/envoy-api/envoy/api/v2/core/config_source.proto -Writing src/generated//envoy/api/v2/core/ApiConfigSource.d.ts from file deps/envoy-api/envoy/api/v2/core/config_source.proto -Writing src/generated//envoy/api/v2/core/ApiConfigSource/ApiType.d.ts from file deps/envoy-api/envoy/api/v2/core/config_source.proto -Writing src/generated//envoy/api/v2/core/AggregatedConfigSource.d.ts from file deps/envoy-api/envoy/api/v2/core/config_source.proto -Writing src/generated//envoy/api/v2/core/SelfConfigSource.d.ts from file deps/envoy-api/envoy/api/v2/core/config_source.proto -Writing src/generated//envoy/api/v2/core/RateLimitSettings.d.ts from file deps/envoy-api/envoy/api/v2/core/config_source.proto -Writing src/generated//envoy/api/v2/core/ConfigSource.d.ts from file deps/envoy-api/envoy/api/v2/core/config_source.proto -Writing src/generated//envoy/api/v2/core/RoutingPriority.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RequestMethod.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/TrafficDirection.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/Locality.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/BuildVersion.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/Extension.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/Node.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/Metadata.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RuntimeUInt32.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RuntimeDouble.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RuntimeFeatureFlag.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/HeaderValue.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/HeaderValueOption.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/HeaderMap.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/DataSource.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RetryPolicy.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RemoteDataSource.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/AsyncDataSource.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/TransportSocket.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RuntimeFractionalPercent.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/ControlPlane.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/GrpcService.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/EnvoyGrpc.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/SslCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/GoogleLocalCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/ChannelCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/CallCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/CallCredentials/ServiceAccountJWTAccessCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/CallCredentials/GoogleIAMCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/CallCredentials/MetadataCredentialsFromPlugin.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/CallCredentials/StsService.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/BackoffStrategy.d.ts from file deps/envoy-api/envoy/api/v2/core/backoff.proto -Writing src/generated//envoy/api/v2/core/HttpUri.d.ts from file deps/envoy-api/envoy/api/v2/core/http_uri.proto -Writing src/generated//envoy/api/v2/core/SocketOption.d.ts from file deps/envoy-api/envoy/api/v2/core/socket_option.proto -Writing src/generated//envoy/api/v2/core/SocketOption/SocketState.d.ts from file deps/envoy-api/envoy/api/v2/core/socket_option.proto -Writing src/generated//envoy/api/v2/core/Pipe.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/SocketAddress.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/SocketAddress/Protocol.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/TcpKeepalive.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/BindConfig.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/Address.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/CidrRange.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/route/VirtualHost.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/VirtualHost/TlsRequirementType.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/FilterAction.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/Route.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/WeightedCluster.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/WeightedCluster/ClusterWeight.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteMatch.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteMatch/GrpcRouteMatchOptions.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteMatch/TlsContextMatchOptions.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/CorsPolicy.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteAction.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteAction/ClusterNotFoundResponseCode.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteAction/InternalRedirectAction.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteAction/RequestMirrorPolicy.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteAction/HashPolicy.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteAction/HashPolicy/Header.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteAction/HashPolicy/Cookie.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteAction/HashPolicy/ConnectionProperties.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteAction/HashPolicy/QueryParameter.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteAction/HashPolicy/FilterState.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RouteAction/UpgradeConfig.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RetryPolicy.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RetryPolicy/RetryPriority.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RetryPolicy/RetryHostPredicate.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RetryPolicy/RetryBackOff.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/HedgePolicy.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RedirectAction.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RedirectAction/RedirectResponseCode.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/DirectResponseAction.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/Decorator.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/Tracing.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/VirtualCluster.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RateLimit.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RateLimit/Action.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RateLimit/Action/SourceCluster.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RateLimit/Action/DestinationCluster.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RateLimit/Action/RequestHeaders.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RateLimit/Action/RemoteAddress.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RateLimit/Action/GenericKey.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/RateLimit/Action/HeaderValueMatch.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/HeaderMatcher.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/api/v2/route/QueryParameterMatcher.d.ts from file deps/envoy-api/envoy/api/v2/route/route_components.proto -Writing src/generated//envoy/type/matcher/RegexMatcher.d.ts from file deps/envoy-api/envoy/type/matcher/regex.proto -Writing src/generated//envoy/type/matcher/RegexMatcher/GoogleRE2.d.ts from file deps/envoy-api/envoy/type/matcher/regex.proto -Writing src/generated//envoy/type/matcher/RegexMatchAndSubstitute.d.ts from file deps/envoy-api/envoy/type/matcher/regex.proto -Writing src/generated//envoy/type/matcher/StringMatcher.d.ts from file deps/envoy-api/envoy/type/matcher/string.proto -Writing src/generated//envoy/type/matcher/ListStringMatcher.d.ts from file deps/envoy-api/envoy/type/matcher/string.proto -Writing src/generated//envoy/type/Int64Range.d.ts from file deps/envoy-api/envoy/type/range.proto -Writing src/generated//envoy/type/Int32Range.d.ts from file deps/envoy-api/envoy/type/range.proto -Writing src/generated//envoy/type/DoubleRange.d.ts from file deps/envoy-api/envoy/type/range.proto -Writing src/generated//envoy/type/tracing/v2/CustomTag.d.ts from file deps/envoy-api/envoy/type/tracing/v2/custom_tag.proto -Writing src/generated//envoy/type/tracing/v2/CustomTag/Literal.d.ts from file deps/envoy-api/envoy/type/tracing/v2/custom_tag.proto -Writing src/generated//envoy/type/tracing/v2/CustomTag/Environment.d.ts from file deps/envoy-api/envoy/type/tracing/v2/custom_tag.proto -Writing src/generated//envoy/type/tracing/v2/CustomTag/Header.d.ts from file deps/envoy-api/envoy/type/tracing/v2/custom_tag.proto -Writing src/generated//envoy/type/tracing/v2/CustomTag/Metadata.d.ts from file deps/envoy-api/envoy/type/tracing/v2/custom_tag.proto -Writing src/generated//envoy/type/SemanticVersion.d.ts from file deps/envoy-api/envoy/type/semantic_version.proto -Writing src/generated//envoy/type/Percent.d.ts from file deps/envoy-api/envoy/type/percent.proto -Writing src/generated//envoy/type/FractionalPercent.d.ts from file deps/envoy-api/envoy/type/percent.proto -Writing src/generated//envoy/type/FractionalPercent/DenominatorType.d.ts from file deps/envoy-api/envoy/type/percent.proto -Writing src/generated//envoy/type/metadata/v2/MetadataKey.d.ts from file deps/envoy-api/envoy/type/metadata/v2/metadata.proto -Writing src/generated//envoy/type/metadata/v2/MetadataKey/PathSegment.d.ts from file deps/envoy-api/envoy/type/metadata/v2/metadata.proto -Writing src/generated//envoy/type/metadata/v2/MetadataKind.d.ts from file deps/envoy-api/envoy/type/metadata/v2/metadata.proto -Writing src/generated//envoy/type/metadata/v2/MetadataKind/Request.d.ts from file deps/envoy-api/envoy/type/metadata/v2/metadata.proto -Writing src/generated//envoy/type/metadata/v2/MetadataKind/Route.d.ts from file deps/envoy-api/envoy/type/metadata/v2/metadata.proto -Writing src/generated//envoy/type/metadata/v2/MetadataKind/Cluster.d.ts from file deps/envoy-api/envoy/type/metadata/v2/metadata.proto -Writing src/generated//envoy/type/metadata/v2/MetadataKind/Host.d.ts from file deps/envoy-api/envoy/type/metadata/v2/metadata.proto -Writing src/generated//udpa/annotations/MigrateAnnotation.d.ts from file deps/udpa/udpa/annotations/migrate.proto -Writing src/generated//udpa/annotations/FieldMigrateAnnotation.d.ts from file deps/udpa/udpa/annotations/migrate.proto -Writing src/generated//udpa/annotations/FileMigrateAnnotation.d.ts from file deps/udpa/udpa/annotations/migrate.proto -Writing src/generated//udpa/annotations/PackageVersionStatus.d.ts from file deps/udpa/udpa/annotations/status.proto -Writing src/generated//udpa/annotations/StatusAnnotation.d.ts from file deps/udpa/udpa/annotations/status.proto -Writing src/generated//validate/FieldRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/FloatRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/DoubleRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/Int32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/Int64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/UInt32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/UInt64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/SInt32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/SInt64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/Fixed32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/Fixed64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/SFixed32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/SFixed64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/BoolRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/StringRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/KnownRegex.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/BytesRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/EnumRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/MessageRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/RepeatedRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/MapRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/AnyRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/DurationRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/TimestampRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//google/protobuf/DoubleValue.d.ts from file null -Writing src/generated//google/protobuf/FloatValue.d.ts from file null -Writing src/generated//google/protobuf/Int64Value.d.ts from file null -Writing src/generated//google/protobuf/UInt64Value.d.ts from file null -Writing src/generated//google/protobuf/Int32Value.d.ts from file null -Writing src/generated//google/protobuf/UInt32Value.d.ts from file null -Writing src/generated//google/protobuf/BoolValue.d.ts from file null -Writing src/generated//google/protobuf/StringValue.d.ts from file null -Writing src/generated//google/protobuf/BytesValue.d.ts from file null -Writing src/generated//google/protobuf/Duration.d.ts from file null -Writing src/generated//google/protobuf/Timestamp.d.ts from file null -Writing src/generated//google/protobuf/Any.d.ts from file null -Writing src/generated//google/protobuf/Struct.d.ts from file null -Writing src/generated//google/protobuf/Value.d.ts from file null -Writing src/generated//google/protobuf/NullValue.d.ts from file null -Writing src/generated//google/protobuf/ListValue.d.ts from file null -Writing src/generated//google/protobuf/FileDescriptorSet.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FileDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/DescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/DescriptorProto/ExtensionRange.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/DescriptorProto/ReservedRange.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldDescriptorProto/Type.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldDescriptorProto/Label.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/OneofDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/EnumDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/EnumValueDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/ServiceDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/MethodDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FileOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FileOptions/OptimizeMode.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/MessageOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldOptions/CType.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldOptions/JSType.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/OneofOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/EnumOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/EnumValueOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/ServiceOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/MethodOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/UninterpretedOption.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/UninterpretedOption/NamePart.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/SourceCodeInfo.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/SourceCodeInfo/Location.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/GeneratedCodeInfo.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/GeneratedCodeInfo/Annotation.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/Empty.d.ts from file null -Processing envoy/api/v2/cluster.proto -Writing src/generated//cluster.d.ts -Writing src/generated//envoy/api/v2/Cluster.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/Cluster/DiscoveryType.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/Cluster/LbPolicy.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/Cluster/DnsLookupFamily.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/Cluster/ClusterProtocolSelection.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/Cluster/TransportSocketMatch.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/Cluster/CustomClusterType.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/Cluster/EdsClusterConfig.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/Cluster/LbSubsetConfig.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/Cluster/LbSubsetConfig/LbSubsetFallbackPolicy.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/Cluster/LbSubsetConfig/LbSubsetSelector.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/Cluster/LbSubsetConfig/LbSubsetSelector/LbSubsetSelectorFallbackPolicy.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/Cluster/LeastRequestLbConfig.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/Cluster/RingHashLbConfig.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/Cluster/RingHashLbConfig/HashFunction.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/Cluster/OriginalDstLbConfig.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/Cluster/CommonLbConfig.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/Cluster/CommonLbConfig/ZoneAwareLbConfig.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/Cluster/CommonLbConfig/LocalityWeightedLbConfig.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/Cluster/CommonLbConfig/ConsistentHashingLbConfig.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/Cluster/RefreshRate.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/LoadBalancingPolicy.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/LoadBalancingPolicy/Policy.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/UpstreamBindConfig.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/UpstreamConnectionOptions.d.ts from file deps/envoy-api/envoy/api/v2/cluster.proto -Writing src/generated//envoy/api/v2/auth/UpstreamTlsContext.d.ts from file deps/envoy-api/envoy/api/v2/auth/tls.proto -Writing src/generated//envoy/api/v2/auth/DownstreamTlsContext.d.ts from file deps/envoy-api/envoy/api/v2/auth/tls.proto -Writing src/generated//envoy/api/v2/auth/CommonTlsContext.d.ts from file deps/envoy-api/envoy/api/v2/auth/tls.proto -Writing src/generated//envoy/api/v2/auth/CommonTlsContext/CombinedCertificateValidationContext.d.ts from file deps/envoy-api/envoy/api/v2/auth/tls.proto -Writing src/generated//envoy/api/v2/auth/TlsParameters.d.ts from file deps/envoy-api/envoy/api/v2/auth/common.proto -Writing src/generated//envoy/api/v2/auth/TlsParameters/TlsProtocol.d.ts from file deps/envoy-api/envoy/api/v2/auth/common.proto -Writing src/generated//envoy/api/v2/auth/PrivateKeyProvider.d.ts from file deps/envoy-api/envoy/api/v2/auth/common.proto -Writing src/generated//envoy/api/v2/auth/TlsCertificate.d.ts from file deps/envoy-api/envoy/api/v2/auth/common.proto -Writing src/generated//envoy/api/v2/auth/TlsSessionTicketKeys.d.ts from file deps/envoy-api/envoy/api/v2/auth/common.proto -Writing src/generated//envoy/api/v2/auth/CertificateValidationContext.d.ts from file deps/envoy-api/envoy/api/v2/auth/common.proto -Writing src/generated//envoy/api/v2/auth/CertificateValidationContext/TrustChainVerification.d.ts from file deps/envoy-api/envoy/api/v2/auth/common.proto -Writing src/generated//envoy/api/v2/auth/GenericSecret.d.ts from file deps/envoy-api/envoy/api/v2/auth/secret.proto -Writing src/generated//envoy/api/v2/auth/SdsSecretConfig.d.ts from file deps/envoy-api/envoy/api/v2/auth/secret.proto -Writing src/generated//envoy/api/v2/auth/Secret.d.ts from file deps/envoy-api/envoy/api/v2/auth/secret.proto -Writing src/generated//envoy/api/v2/core/ApiVersion.d.ts from file deps/envoy-api/envoy/api/v2/core/config_source.proto -Writing src/generated//envoy/api/v2/core/ApiConfigSource.d.ts from file deps/envoy-api/envoy/api/v2/core/config_source.proto -Writing src/generated//envoy/api/v2/core/ApiConfigSource/ApiType.d.ts from file deps/envoy-api/envoy/api/v2/core/config_source.proto -Writing src/generated//envoy/api/v2/core/AggregatedConfigSource.d.ts from file deps/envoy-api/envoy/api/v2/core/config_source.proto -Writing src/generated//envoy/api/v2/core/SelfConfigSource.d.ts from file deps/envoy-api/envoy/api/v2/core/config_source.proto -Writing src/generated//envoy/api/v2/core/RateLimitSettings.d.ts from file deps/envoy-api/envoy/api/v2/core/config_source.proto -Writing src/generated//envoy/api/v2/core/ConfigSource.d.ts from file deps/envoy-api/envoy/api/v2/core/config_source.proto -Writing src/generated//envoy/api/v2/core/HealthStatus.d.ts from file deps/envoy-api/envoy/api/v2/core/health_check.proto -Writing src/generated//envoy/api/v2/core/HealthCheck.d.ts from file deps/envoy-api/envoy/api/v2/core/health_check.proto -Writing src/generated//envoy/api/v2/core/HealthCheck/Payload.d.ts from file deps/envoy-api/envoy/api/v2/core/health_check.proto -Writing src/generated//envoy/api/v2/core/HealthCheck/HttpHealthCheck.d.ts from file deps/envoy-api/envoy/api/v2/core/health_check.proto -Writing src/generated//envoy/api/v2/core/HealthCheck/TcpHealthCheck.d.ts from file deps/envoy-api/envoy/api/v2/core/health_check.proto -Writing src/generated//envoy/api/v2/core/HealthCheck/RedisHealthCheck.d.ts from file deps/envoy-api/envoy/api/v2/core/health_check.proto -Writing src/generated//envoy/api/v2/core/HealthCheck/GrpcHealthCheck.d.ts from file deps/envoy-api/envoy/api/v2/core/health_check.proto -Writing src/generated//envoy/api/v2/core/HealthCheck/CustomHealthCheck.d.ts from file deps/envoy-api/envoy/api/v2/core/health_check.proto -Writing src/generated//envoy/api/v2/core/HealthCheck/TlsOptions.d.ts from file deps/envoy-api/envoy/api/v2/core/health_check.proto -Writing src/generated//envoy/api/v2/core/TcpProtocolOptions.d.ts from file deps/envoy-api/envoy/api/v2/core/protocol.proto -Writing src/generated//envoy/api/v2/core/UpstreamHttpProtocolOptions.d.ts from file deps/envoy-api/envoy/api/v2/core/protocol.proto -Writing src/generated//envoy/api/v2/core/HttpProtocolOptions.d.ts from file deps/envoy-api/envoy/api/v2/core/protocol.proto -Writing src/generated//envoy/api/v2/core/HttpProtocolOptions/HeadersWithUnderscoresAction.d.ts from file deps/envoy-api/envoy/api/v2/core/protocol.proto -Writing src/generated//envoy/api/v2/core/Http1ProtocolOptions.d.ts from file deps/envoy-api/envoy/api/v2/core/protocol.proto -Writing src/generated//envoy/api/v2/core/Http1ProtocolOptions/HeaderKeyFormat.d.ts from file deps/envoy-api/envoy/api/v2/core/protocol.proto -Writing src/generated//envoy/api/v2/core/Http1ProtocolOptions/HeaderKeyFormat/ProperCaseWords.d.ts from file deps/envoy-api/envoy/api/v2/core/protocol.proto -Writing src/generated//envoy/api/v2/core/Http2ProtocolOptions.d.ts from file deps/envoy-api/envoy/api/v2/core/protocol.proto -Writing src/generated//envoy/api/v2/core/Http2ProtocolOptions/SettingsParameter.d.ts from file deps/envoy-api/envoy/api/v2/core/protocol.proto -Writing src/generated//envoy/api/v2/core/GrpcProtocolOptions.d.ts from file deps/envoy-api/envoy/api/v2/core/protocol.proto -Writing src/generated//envoy/api/v2/core/Pipe.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/SocketAddress.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/SocketAddress/Protocol.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/TcpKeepalive.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/BindConfig.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/Address.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/CidrRange.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/RoutingPriority.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RequestMethod.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/TrafficDirection.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/Locality.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/BuildVersion.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/Extension.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/Node.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/Metadata.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RuntimeUInt32.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RuntimeDouble.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RuntimeFeatureFlag.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/HeaderValue.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/HeaderValueOption.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/HeaderMap.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/DataSource.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RetryPolicy.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RemoteDataSource.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/AsyncDataSource.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/TransportSocket.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RuntimeFractionalPercent.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/ControlPlane.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/GrpcService.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/EnvoyGrpc.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/SslCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/GoogleLocalCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/ChannelCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/CallCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/CallCredentials/ServiceAccountJWTAccessCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/CallCredentials/GoogleIAMCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/CallCredentials/MetadataCredentialsFromPlugin.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/CallCredentials/StsService.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/BackoffStrategy.d.ts from file deps/envoy-api/envoy/api/v2/core/backoff.proto -Writing src/generated//envoy/api/v2/core/HttpUri.d.ts from file deps/envoy-api/envoy/api/v2/core/http_uri.proto -Writing src/generated//envoy/api/v2/core/SocketOption.d.ts from file deps/envoy-api/envoy/api/v2/core/socket_option.proto -Writing src/generated//envoy/api/v2/core/SocketOption/SocketState.d.ts from file deps/envoy-api/envoy/api/v2/core/socket_option.proto -Writing src/generated//envoy/api/v2/core/EventServiceConfig.d.ts from file deps/envoy-api/envoy/api/v2/core/event_service_config.proto -Writing src/generated//envoy/api/v2/cluster/CircuitBreakers.d.ts from file deps/envoy-api/envoy/api/v2/cluster/circuit_breaker.proto -Writing src/generated//envoy/api/v2/cluster/CircuitBreakers/Thresholds.d.ts from file deps/envoy-api/envoy/api/v2/cluster/circuit_breaker.proto -Writing src/generated//envoy/api/v2/cluster/CircuitBreakers/Thresholds/RetryBudget.d.ts from file deps/envoy-api/envoy/api/v2/cluster/circuit_breaker.proto -Writing src/generated//envoy/api/v2/cluster/Filter.d.ts from file deps/envoy-api/envoy/api/v2/cluster/filter.proto -Writing src/generated//envoy/api/v2/cluster/OutlierDetection.d.ts from file deps/envoy-api/envoy/api/v2/cluster/outlier_detection.proto -Writing src/generated//envoy/api/v2/ClusterLoadAssignment.d.ts from file deps/envoy-api/envoy/api/v2/endpoint.proto -Writing src/generated//envoy/api/v2/ClusterLoadAssignment/Policy.d.ts from file deps/envoy-api/envoy/api/v2/endpoint.proto -Writing src/generated//envoy/api/v2/ClusterLoadAssignment/Policy/DropOverload.d.ts from file deps/envoy-api/envoy/api/v2/endpoint.proto -Writing src/generated//envoy/api/v2/endpoint/Endpoint.d.ts from file deps/envoy-api/envoy/api/v2/endpoint/endpoint_components.proto -Writing src/generated//envoy/api/v2/endpoint/Endpoint/HealthCheckConfig.d.ts from file deps/envoy-api/envoy/api/v2/endpoint/endpoint_components.proto -Writing src/generated//envoy/api/v2/endpoint/LbEndpoint.d.ts from file deps/envoy-api/envoy/api/v2/endpoint/endpoint_components.proto -Writing src/generated//envoy/api/v2/endpoint/LocalityLbEndpoints.d.ts from file deps/envoy-api/envoy/api/v2/endpoint/endpoint_components.proto -Writing src/generated//envoy/type/Percent.d.ts from file deps/envoy-api/envoy/type/percent.proto -Writing src/generated//envoy/type/FractionalPercent.d.ts from file deps/envoy-api/envoy/type/percent.proto -Writing src/generated//envoy/type/FractionalPercent/DenominatorType.d.ts from file deps/envoy-api/envoy/type/percent.proto -Writing src/generated//envoy/type/matcher/StringMatcher.d.ts from file deps/envoy-api/envoy/type/matcher/string.proto -Writing src/generated//envoy/type/matcher/ListStringMatcher.d.ts from file deps/envoy-api/envoy/type/matcher/string.proto -Writing src/generated//envoy/type/matcher/RegexMatcher.d.ts from file deps/envoy-api/envoy/type/matcher/regex.proto -Writing src/generated//envoy/type/matcher/RegexMatcher/GoogleRE2.d.ts from file deps/envoy-api/envoy/type/matcher/regex.proto -Writing src/generated//envoy/type/matcher/RegexMatchAndSubstitute.d.ts from file deps/envoy-api/envoy/type/matcher/regex.proto -Writing src/generated//envoy/type/CodecClientType.d.ts from file deps/envoy-api/envoy/type/http.proto -Writing src/generated//envoy/type/SemanticVersion.d.ts from file deps/envoy-api/envoy/type/semantic_version.proto -Writing src/generated//envoy/type/Int64Range.d.ts from file deps/envoy-api/envoy/type/range.proto -Writing src/generated//envoy/type/Int32Range.d.ts from file deps/envoy-api/envoy/type/range.proto -Writing src/generated//envoy/type/DoubleRange.d.ts from file deps/envoy-api/envoy/type/range.proto -Writing src/generated//udpa/annotations/MigrateAnnotation.d.ts from file deps/udpa/udpa/annotations/migrate.proto -Writing src/generated//udpa/annotations/FieldMigrateAnnotation.d.ts from file deps/udpa/udpa/annotations/migrate.proto -Writing src/generated//udpa/annotations/FileMigrateAnnotation.d.ts from file deps/udpa/udpa/annotations/migrate.proto -Writing src/generated//udpa/annotations/PackageVersionStatus.d.ts from file deps/udpa/udpa/annotations/status.proto -Writing src/generated//udpa/annotations/StatusAnnotation.d.ts from file deps/udpa/udpa/annotations/status.proto -Writing src/generated//validate/FieldRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/FloatRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/DoubleRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/Int32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/Int64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/UInt32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/UInt64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/SInt32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/SInt64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/Fixed32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/Fixed64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/SFixed32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/SFixed64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/BoolRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/StringRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/KnownRegex.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/BytesRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/EnumRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/MessageRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/RepeatedRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/MapRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/AnyRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/DurationRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/TimestampRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//google/protobuf/Any.d.ts from file null -Writing src/generated//google/protobuf/Duration.d.ts from file null -Writing src/generated//google/protobuf/Struct.d.ts from file null -Writing src/generated//google/protobuf/Value.d.ts from file null -Writing src/generated//google/protobuf/NullValue.d.ts from file null -Writing src/generated//google/protobuf/ListValue.d.ts from file null -Writing src/generated//google/protobuf/DoubleValue.d.ts from file null -Writing src/generated//google/protobuf/FloatValue.d.ts from file null -Writing src/generated//google/protobuf/Int64Value.d.ts from file null -Writing src/generated//google/protobuf/UInt64Value.d.ts from file null -Writing src/generated//google/protobuf/Int32Value.d.ts from file null -Writing src/generated//google/protobuf/UInt32Value.d.ts from file null -Writing src/generated//google/protobuf/BoolValue.d.ts from file null -Writing src/generated//google/protobuf/StringValue.d.ts from file null -Writing src/generated//google/protobuf/BytesValue.d.ts from file null -Writing src/generated//google/protobuf/Timestamp.d.ts from file null -Writing src/generated//google/protobuf/Empty.d.ts from file null -Writing src/generated//google/protobuf/FileDescriptorSet.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FileDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/DescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/DescriptorProto/ExtensionRange.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/DescriptorProto/ReservedRange.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldDescriptorProto/Type.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldDescriptorProto/Label.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/OneofDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/EnumDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/EnumValueDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/ServiceDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/MethodDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FileOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FileOptions/OptimizeMode.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/MessageOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldOptions/CType.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldOptions/JSType.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/OneofOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/EnumOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/EnumValueOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/ServiceOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/MethodOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/UninterpretedOption.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/UninterpretedOption/NamePart.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/SourceCodeInfo.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/SourceCodeInfo/Location.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/GeneratedCodeInfo.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/GeneratedCodeInfo/Annotation.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/api/Http.d.ts from file node_modules/protobufjs/google/api/http.proto -Writing src/generated//google/api/HttpRule.d.ts from file node_modules/protobufjs/google/api/http.proto -Writing src/generated//google/api/CustomHttpPattern.d.ts from file node_modules/protobufjs/google/api/http.proto -Processing envoy/api/v2/endpoint.proto -Writing src/generated//endpoint.d.ts -Writing src/generated//envoy/api/v2/ClusterLoadAssignment.d.ts from file deps/envoy-api/envoy/api/v2/endpoint.proto -Writing src/generated//envoy/api/v2/ClusterLoadAssignment/Policy.d.ts from file deps/envoy-api/envoy/api/v2/endpoint.proto -Writing src/generated//envoy/api/v2/ClusterLoadAssignment/Policy/DropOverload.d.ts from file deps/envoy-api/envoy/api/v2/endpoint.proto -Writing src/generated//envoy/api/v2/endpoint/Endpoint.d.ts from file deps/envoy-api/envoy/api/v2/endpoint/endpoint_components.proto -Writing src/generated//envoy/api/v2/endpoint/Endpoint/HealthCheckConfig.d.ts from file deps/envoy-api/envoy/api/v2/endpoint/endpoint_components.proto -Writing src/generated//envoy/api/v2/endpoint/LbEndpoint.d.ts from file deps/envoy-api/envoy/api/v2/endpoint/endpoint_components.proto -Writing src/generated//envoy/api/v2/endpoint/LocalityLbEndpoints.d.ts from file deps/envoy-api/envoy/api/v2/endpoint/endpoint_components.proto -Writing src/generated//envoy/api/v2/core/RoutingPriority.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RequestMethod.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/TrafficDirection.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/Locality.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/BuildVersion.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/Extension.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/Node.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/Metadata.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RuntimeUInt32.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RuntimeDouble.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RuntimeFeatureFlag.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/HeaderValue.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/HeaderValueOption.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/HeaderMap.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/DataSource.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RetryPolicy.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RemoteDataSource.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/AsyncDataSource.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/TransportSocket.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/RuntimeFractionalPercent.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/ControlPlane.d.ts from file deps/envoy-api/envoy/api/v2/core/base.proto -Writing src/generated//envoy/api/v2/core/Pipe.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/SocketAddress.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/SocketAddress/Protocol.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/TcpKeepalive.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/BindConfig.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/Address.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/CidrRange.d.ts from file deps/envoy-api/envoy/api/v2/core/address.proto -Writing src/generated//envoy/api/v2/core/HealthStatus.d.ts from file deps/envoy-api/envoy/api/v2/core/health_check.proto -Writing src/generated//envoy/api/v2/core/HealthCheck.d.ts from file deps/envoy-api/envoy/api/v2/core/health_check.proto -Writing src/generated//envoy/api/v2/core/HealthCheck/Payload.d.ts from file deps/envoy-api/envoy/api/v2/core/health_check.proto -Writing src/generated//envoy/api/v2/core/HealthCheck/HttpHealthCheck.d.ts from file deps/envoy-api/envoy/api/v2/core/health_check.proto -Writing src/generated//envoy/api/v2/core/HealthCheck/TcpHealthCheck.d.ts from file deps/envoy-api/envoy/api/v2/core/health_check.proto -Writing src/generated//envoy/api/v2/core/HealthCheck/RedisHealthCheck.d.ts from file deps/envoy-api/envoy/api/v2/core/health_check.proto -Writing src/generated//envoy/api/v2/core/HealthCheck/GrpcHealthCheck.d.ts from file deps/envoy-api/envoy/api/v2/core/health_check.proto -Writing src/generated//envoy/api/v2/core/HealthCheck/CustomHealthCheck.d.ts from file deps/envoy-api/envoy/api/v2/core/health_check.proto -Writing src/generated//envoy/api/v2/core/HealthCheck/TlsOptions.d.ts from file deps/envoy-api/envoy/api/v2/core/health_check.proto -Writing src/generated//envoy/api/v2/core/BackoffStrategy.d.ts from file deps/envoy-api/envoy/api/v2/core/backoff.proto -Writing src/generated//envoy/api/v2/core/HttpUri.d.ts from file deps/envoy-api/envoy/api/v2/core/http_uri.proto -Writing src/generated//envoy/api/v2/core/SocketOption.d.ts from file deps/envoy-api/envoy/api/v2/core/socket_option.proto -Writing src/generated//envoy/api/v2/core/SocketOption/SocketState.d.ts from file deps/envoy-api/envoy/api/v2/core/socket_option.proto -Writing src/generated//envoy/api/v2/core/EventServiceConfig.d.ts from file deps/envoy-api/envoy/api/v2/core/event_service_config.proto -Writing src/generated//envoy/api/v2/core/GrpcService.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/EnvoyGrpc.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/SslCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/GoogleLocalCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/ChannelCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/CallCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/CallCredentials/ServiceAccountJWTAccessCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/CallCredentials/GoogleIAMCredentials.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/CallCredentials/MetadataCredentialsFromPlugin.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/api/v2/core/GrpcService/GoogleGrpc/CallCredentials/StsService.d.ts from file deps/envoy-api/envoy/api/v2/core/grpc_service.proto -Writing src/generated//envoy/type/Percent.d.ts from file deps/envoy-api/envoy/type/percent.proto -Writing src/generated//envoy/type/FractionalPercent.d.ts from file deps/envoy-api/envoy/type/percent.proto -Writing src/generated//envoy/type/FractionalPercent/DenominatorType.d.ts from file deps/envoy-api/envoy/type/percent.proto -Writing src/generated//envoy/type/matcher/StringMatcher.d.ts from file deps/envoy-api/envoy/type/matcher/string.proto -Writing src/generated//envoy/type/matcher/ListStringMatcher.d.ts from file deps/envoy-api/envoy/type/matcher/string.proto -Writing src/generated//envoy/type/matcher/RegexMatcher.d.ts from file deps/envoy-api/envoy/type/matcher/regex.proto -Writing src/generated//envoy/type/matcher/RegexMatcher/GoogleRE2.d.ts from file deps/envoy-api/envoy/type/matcher/regex.proto -Writing src/generated//envoy/type/matcher/RegexMatchAndSubstitute.d.ts from file deps/envoy-api/envoy/type/matcher/regex.proto -Writing src/generated//envoy/type/SemanticVersion.d.ts from file deps/envoy-api/envoy/type/semantic_version.proto -Writing src/generated//envoy/type/CodecClientType.d.ts from file deps/envoy-api/envoy/type/http.proto -Writing src/generated//envoy/type/Int64Range.d.ts from file deps/envoy-api/envoy/type/range.proto -Writing src/generated//envoy/type/Int32Range.d.ts from file deps/envoy-api/envoy/type/range.proto -Writing src/generated//envoy/type/DoubleRange.d.ts from file deps/envoy-api/envoy/type/range.proto -Writing src/generated//udpa/annotations/MigrateAnnotation.d.ts from file deps/udpa/udpa/annotations/migrate.proto -Writing src/generated//udpa/annotations/FieldMigrateAnnotation.d.ts from file deps/udpa/udpa/annotations/migrate.proto -Writing src/generated//udpa/annotations/FileMigrateAnnotation.d.ts from file deps/udpa/udpa/annotations/migrate.proto -Writing src/generated//udpa/annotations/PackageVersionStatus.d.ts from file deps/udpa/udpa/annotations/status.proto -Writing src/generated//udpa/annotations/StatusAnnotation.d.ts from file deps/udpa/udpa/annotations/status.proto -Writing src/generated//validate/FieldRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/FloatRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/DoubleRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/Int32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/Int64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/UInt32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/UInt64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/SInt32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/SInt64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/Fixed32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/Fixed64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/SFixed32Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/SFixed64Rules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/BoolRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/StringRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/KnownRegex.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/BytesRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/EnumRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/MessageRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/RepeatedRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/MapRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/AnyRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/DurationRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//validate/TimestampRules.d.ts from file deps/protoc-gen-validate/validate/validate.proto -Writing src/generated//google/protobuf/Duration.d.ts from file null -Writing src/generated//google/protobuf/DoubleValue.d.ts from file null -Writing src/generated//google/protobuf/FloatValue.d.ts from file null -Writing src/generated//google/protobuf/Int64Value.d.ts from file null -Writing src/generated//google/protobuf/UInt64Value.d.ts from file null -Writing src/generated//google/protobuf/Int32Value.d.ts from file null -Writing src/generated//google/protobuf/UInt32Value.d.ts from file null -Writing src/generated//google/protobuf/BoolValue.d.ts from file null -Writing src/generated//google/protobuf/StringValue.d.ts from file null -Writing src/generated//google/protobuf/BytesValue.d.ts from file null -Writing src/generated//google/protobuf/Timestamp.d.ts from file null -Writing src/generated//google/protobuf/FileDescriptorSet.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FileDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/DescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/DescriptorProto/ExtensionRange.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/DescriptorProto/ReservedRange.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldDescriptorProto/Type.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldDescriptorProto/Label.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/OneofDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/EnumDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/EnumValueDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/ServiceDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/MethodDescriptorProto.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FileOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FileOptions/OptimizeMode.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/MessageOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldOptions/CType.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/FieldOptions/JSType.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/OneofOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/EnumOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/EnumValueOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/ServiceOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/MethodOptions.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/UninterpretedOption.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/UninterpretedOption/NamePart.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/SourceCodeInfo.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/SourceCodeInfo/Location.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/GeneratedCodeInfo.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/GeneratedCodeInfo/Annotation.d.ts from file node_modules/protobufjs/google/protobuf/descriptor.proto -Writing src/generated//google/protobuf/Any.d.ts from file null -Writing src/generated//google/protobuf/Struct.d.ts from file null -Writing src/generated//google/protobuf/Value.d.ts from file null -Writing src/generated//google/protobuf/NullValue.d.ts from file null -Writing src/generated//google/protobuf/ListValue.d.ts from file null -Writing src/generated//google/protobuf/Empty.d.ts from file null -Writing src/generated//google/api/Http.d.ts from file node_modules/protobufjs/google/api/http.proto -Writing src/generated//google/api/HttpRule.d.ts from file node_modules/protobufjs/google/api/http.proto -Writing src/generated//google/api/CustomHttpPattern.d.ts from file node_modules/protobufjs/google/api/http.proto -Success diff --git a/packages/grpc-js/package.json b/packages/grpc-js/package.json index f0e20ff10..73b63bf7b 100644 --- a/packages/grpc-js/package.json +++ b/packages/grpc-js/package.json @@ -1,12 +1,12 @@ { "name": "@grpc/grpc-js", - "version": "1.3.7", + "version": "1.10.9", "description": "gRPC Library for Node - pure JS implementation", "homepage": "https://grpc.io/", "repository": "https://github.com/grpc/grpc-node/tree/master/packages/grpc-js", "main": "build/src/index.js", "engines": { - "node": "^8.13.0 || >=10.10.0" + "node": ">=12.10.0" }, "keywords": [], "author": { @@ -15,27 +15,35 @@ "types": "build/src/index.d.ts", "license": "Apache-2.0", "devDependencies": { - "@grpc/proto-loader": "^0.5.5", - "@types/gulp": "^4.0.6", - "@types/gulp-mocha": "0.0.32", - "@types/lodash": "^4.14.108", - "@types/mocha": "^5.2.6", - "@types/ncp": "^2.0.1", - "@types/pify": "^3.0.2", - "@types/yargs": "^15.0.5", - "clang-format": "^1.0.55", + "@types/gulp": "^4.0.17", + "@types/gulp-mocha": "0.0.37", + "@types/lodash": "^4.14.202", + "@types/mocha": "^10.0.6", + "@types/ncp": "^2.0.8", + "@types/node": ">=20.11.20", + "@types/pify": "^5.0.4", + "@types/semver": "^7.5.8", + "@typescript-eslint/eslint-plugin": "^7.1.0", + "@typescript-eslint/parser": "^7.1.0", + "@typescript-eslint/typescript-estree": "^7.1.0", + "clang-format": "^1.8.0", + "eslint": "^8.42.0", + "eslint-config-prettier": "^8.8.0", + "eslint-plugin-node": "^11.1.0", + "eslint-plugin-prettier": "^4.2.1", "execa": "^2.0.3", - "gts": "^2.0.0", "gulp": "^4.0.2", "gulp-mocha": "^6.0.0", - "lodash": "^4.17.4", + "lodash": "^4.17.21", + "madge": "^5.0.1", "mocha-jenkins-reporter": "^0.4.1", "ncp": "^2.0.0", "pify": "^4.0.1", + "prettier": "^2.8.8", "rimraf": "^3.0.2", - "ts-node": "^8.3.0", - "typescript": "^3.7.2", - "yargs": "^15.4.1" + "semver": "^7.6.0", + "ts-node": "^10.9.2", + "typescript": "^5.3.3" }, "contributors": [ { @@ -44,23 +52,27 @@ ], "scripts": { "build": "npm run compile", - "clean": "node -e 'require(\"rimraf\")(\"./build\", () => {})'", + "clean": "rimraf ./build", "compile": "tsc -p .", "format": "clang-format -i -style=\"{Language: JavaScript, BasedOnStyle: Google, ColumnLimit: 80}\" src/*.ts test/*.ts", - "lint": "npm run check", - "prepare": "npm run compile", + "lint": "eslint src/*.ts test/*.ts", + "prepare": "npm run generate-types && npm run compile", "test": "gulp test", - "check": "gts check src/**/*.ts", - "fix": "gts fix src/*.ts", - "pretest": "npm run compile", - "posttest": "npm run check" + "check": "npm run lint", + "fix": "eslint --fix src/*.ts test/*.ts", + "pretest": "npm run generate-types && npm run generate-test-types && npm run compile", + "posttest": "npm run check && madge -c ./build/src", + "generate-types": "proto-loader-gen-types --keepCase --longs String --enums String --defaults --oneofs --includeComments --includeDirs proto/ --include-dirs test/fixtures/ -O src/generated/ --grpcLib ../index channelz.proto", + "generate-test-types": "proto-loader-gen-types --keepCase --longs String --enums String --defaults --oneofs --includeComments --include-dirs test/fixtures/ -O test/generated/ --grpcLib ../../src/index test_service.proto" }, "dependencies": { - "@types/node": ">=12.12.47" + "@grpc/proto-loader": "^0.7.13", + "@js-sdsl/ordered-map": "^4.4.2" }, "files": [ "src/**/*.ts", - "build/src/*.{js,d.ts,js.map}", + "build/src/**/*.{js,d.ts,js.map}", + "proto/*.proto", "LICENSE", "deps/envoy-api/envoy/api/v2/**/*.proto", "deps/envoy-api/envoy/config/**/*.proto", diff --git a/packages/grpc-js/prettier.config.js b/packages/grpc-js/prettier.config.js index 92747c8cc..ecd1f4854 100644 --- a/packages/grpc-js/prettier.config.js +++ b/packages/grpc-js/prettier.config.js @@ -2,4 +2,6 @@ module.exports = { proseWrap: 'always', singleQuote: true, trailingComma: 'es5', + bracketSpacing: true, + arrowParens: 'avoid', }; diff --git a/packages/grpc-js/proto/channelz.proto b/packages/grpc-js/proto/channelz.proto new file mode 100644 index 000000000..446e9794b --- /dev/null +++ b/packages/grpc-js/proto/channelz.proto @@ -0,0 +1,564 @@ +// Copyright 2018 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines an interface for exporting monitoring information +// out of gRPC servers. See the full design at +// https://github.com/grpc/proposal/blob/master/A14-channelz.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/channelz/v1/channelz.proto + +syntax = "proto3"; + +package grpc.channelz.v1; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +option go_package = "google.golang.org/grpc/channelz/grpc_channelz_v1"; +option java_multiple_files = true; +option java_package = "io.grpc.channelz.v1"; +option java_outer_classname = "ChannelzProto"; + +// Channel is a logical grouping of channels, subchannels, and sockets. +message Channel { + // The identifier for this channel. This should bet set. + ChannelRef ref = 1; + // Data specific to this channel. + ChannelData data = 2; + // At most one of 'channel_ref+subchannel_ref' and 'socket' is set. + + // There are no ordering guarantees on the order of channel refs. + // There may not be cycles in the ref graph. + // A channel ref may be present in more than one channel or subchannel. + repeated ChannelRef channel_ref = 3; + + // At most one of 'channel_ref+subchannel_ref' and 'socket' is set. + // There are no ordering guarantees on the order of subchannel refs. + // There may not be cycles in the ref graph. + // A sub channel ref may be present in more than one channel or subchannel. + repeated SubchannelRef subchannel_ref = 4; + + // There are no ordering guarantees on the order of sockets. + repeated SocketRef socket_ref = 5; +} + +// Subchannel is a logical grouping of channels, subchannels, and sockets. +// A subchannel is load balanced over by it's ancestor +message Subchannel { + // The identifier for this channel. + SubchannelRef ref = 1; + // Data specific to this channel. + ChannelData data = 2; + // At most one of 'channel_ref+subchannel_ref' and 'socket' is set. + + // There are no ordering guarantees on the order of channel refs. + // There may not be cycles in the ref graph. + // A channel ref may be present in more than one channel or subchannel. + repeated ChannelRef channel_ref = 3; + + // At most one of 'channel_ref+subchannel_ref' and 'socket' is set. + // There are no ordering guarantees on the order of subchannel refs. + // There may not be cycles in the ref graph. + // A sub channel ref may be present in more than one channel or subchannel. + repeated SubchannelRef subchannel_ref = 4; + + // There are no ordering guarantees on the order of sockets. + repeated SocketRef socket_ref = 5; +} + +// These come from the specified states in this document: +// https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md +message ChannelConnectivityState { + enum State { + UNKNOWN = 0; + IDLE = 1; + CONNECTING = 2; + READY = 3; + TRANSIENT_FAILURE = 4; + SHUTDOWN = 5; + } + State state = 1; +} + +// Channel data is data related to a specific Channel or Subchannel. +message ChannelData { + // The connectivity state of the channel or subchannel. Implementations + // should always set this. + ChannelConnectivityState state = 1; + + // The target this channel originally tried to connect to. May be absent + string target = 2; + + // A trace of recent events on the channel. May be absent. + ChannelTrace trace = 3; + + // The number of calls started on the channel + int64 calls_started = 4; + // The number of calls that have completed with an OK status + int64 calls_succeeded = 5; + // The number of calls that have completed with a non-OK status + int64 calls_failed = 6; + + // The last time a call was started on the channel. + google.protobuf.Timestamp last_call_started_timestamp = 7; +} + +// A trace event is an interesting thing that happened to a channel or +// subchannel, such as creation, address resolution, subchannel creation, etc. +message ChannelTraceEvent { + // High level description of the event. + string description = 1; + // The supported severity levels of trace events. + enum Severity { + CT_UNKNOWN = 0; + CT_INFO = 1; + CT_WARNING = 2; + CT_ERROR = 3; + } + // the severity of the trace event + Severity severity = 2; + // When this event occurred. + google.protobuf.Timestamp timestamp = 3; + // ref of referenced channel or subchannel. + // Optional, only present if this event refers to a child object. For example, + // this field would be filled if this trace event was for a subchannel being + // created. + oneof child_ref { + ChannelRef channel_ref = 4; + SubchannelRef subchannel_ref = 5; + } +} + +// ChannelTrace represents the recent events that have occurred on the channel. +message ChannelTrace { + // Number of events ever logged in this tracing object. This can differ from + // events.size() because events can be overwritten or garbage collected by + // implementations. + int64 num_events_logged = 1; + // Time that this channel was created. + google.protobuf.Timestamp creation_timestamp = 2; + // List of events that have occurred on this channel. + repeated ChannelTraceEvent events = 3; +} + +// ChannelRef is a reference to a Channel. +message ChannelRef { + // The globally unique id for this channel. Must be a positive number. + int64 channel_id = 1; + // An optional name associated with the channel. + string name = 2; + // Intentionally don't use field numbers from other refs. + reserved 3, 4, 5, 6, 7, 8; +} + +// SubchannelRef is a reference to a Subchannel. +message SubchannelRef { + // The globally unique id for this subchannel. Must be a positive number. + int64 subchannel_id = 7; + // An optional name associated with the subchannel. + string name = 8; + // Intentionally don't use field numbers from other refs. + reserved 1, 2, 3, 4, 5, 6; +} + +// SocketRef is a reference to a Socket. +message SocketRef { + // The globally unique id for this socket. Must be a positive number. + int64 socket_id = 3; + // An optional name associated with the socket. + string name = 4; + // Intentionally don't use field numbers from other refs. + reserved 1, 2, 5, 6, 7, 8; +} + +// ServerRef is a reference to a Server. +message ServerRef { + // A globally unique identifier for this server. Must be a positive number. + int64 server_id = 5; + // An optional name associated with the server. + string name = 6; + // Intentionally don't use field numbers from other refs. + reserved 1, 2, 3, 4, 7, 8; +} + +// Server represents a single server. There may be multiple servers in a single +// program. +message Server { + // The identifier for a Server. This should be set. + ServerRef ref = 1; + // The associated data of the Server. + ServerData data = 2; + + // The sockets that the server is listening on. There are no ordering + // guarantees. This may be absent. + repeated SocketRef listen_socket = 3; +} + +// ServerData is data for a specific Server. +message ServerData { + // A trace of recent events on the server. May be absent. + ChannelTrace trace = 1; + + // The number of incoming calls started on the server + int64 calls_started = 2; + // The number of incoming calls that have completed with an OK status + int64 calls_succeeded = 3; + // The number of incoming calls that have a completed with a non-OK status + int64 calls_failed = 4; + + // The last time a call was started on the server. + google.protobuf.Timestamp last_call_started_timestamp = 5; +} + +// Information about an actual connection. Pronounced "sock-ay". +message Socket { + // The identifier for the Socket. + SocketRef ref = 1; + + // Data specific to this Socket. + SocketData data = 2; + // The locally bound address. + Address local = 3; + // The remote bound address. May be absent. + Address remote = 4; + // Security details for this socket. May be absent if not available, or + // there is no security on the socket. + Security security = 5; + + // Optional, represents the name of the remote endpoint, if different than + // the original target name. + string remote_name = 6; +} + +// SocketData is data associated for a specific Socket. The fields present +// are specific to the implementation, so there may be minor differences in +// the semantics. (e.g. flow control windows) +message SocketData { + // The number of streams that have been started. + int64 streams_started = 1; + // The number of streams that have ended successfully: + // On client side, received frame with eos bit set; + // On server side, sent frame with eos bit set. + int64 streams_succeeded = 2; + // The number of streams that have ended unsuccessfully: + // On client side, ended without receiving frame with eos bit set; + // On server side, ended without sending frame with eos bit set. + int64 streams_failed = 3; + // The number of grpc messages successfully sent on this socket. + int64 messages_sent = 4; + // The number of grpc messages received on this socket. + int64 messages_received = 5; + + // The number of keep alives sent. This is typically implemented with HTTP/2 + // ping messages. + int64 keep_alives_sent = 6; + + // The last time a stream was created by this endpoint. Usually unset for + // servers. + google.protobuf.Timestamp last_local_stream_created_timestamp = 7; + // The last time a stream was created by the remote endpoint. Usually unset + // for clients. + google.protobuf.Timestamp last_remote_stream_created_timestamp = 8; + + // The last time a message was sent by this endpoint. + google.protobuf.Timestamp last_message_sent_timestamp = 9; + // The last time a message was received by this endpoint. + google.protobuf.Timestamp last_message_received_timestamp = 10; + + // The amount of window, granted to the local endpoint by the remote endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + google.protobuf.Int64Value local_flow_control_window = 11; + + // The amount of window, granted to the remote endpoint by the local endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + google.protobuf.Int64Value remote_flow_control_window = 12; + + // Socket options set on this socket. May be absent if 'summary' is set + // on GetSocketRequest. + repeated SocketOption option = 13; +} + +// Address represents the address used to create the socket. +message Address { + message TcpIpAddress { + // Either the IPv4 or IPv6 address in bytes. Will be either 4 bytes or 16 + // bytes in length. + bytes ip_address = 1; + // 0-64k, or -1 if not appropriate. + int32 port = 2; + } + // A Unix Domain Socket address. + message UdsAddress { + string filename = 1; + } + // An address type not included above. + message OtherAddress { + // The human readable version of the value. This value should be set. + string name = 1; + // The actual address message. + google.protobuf.Any value = 2; + } + + oneof address { + TcpIpAddress tcpip_address = 1; + UdsAddress uds_address = 2; + OtherAddress other_address = 3; + } +} + +// Security represents details about how secure the socket is. +message Security { + message Tls { + oneof cipher_suite { + // The cipher suite name in the RFC 4346 format: + // https://tools.ietf.org/html/rfc4346#appendix-C + string standard_name = 1; + // Some other way to describe the cipher suite if + // the RFC 4346 name is not available. + string other_name = 2; + } + // the certificate used by this endpoint. + bytes local_certificate = 3; + // the certificate used by the remote endpoint. + bytes remote_certificate = 4; + } + message OtherSecurity { + // The human readable version of the value. + string name = 1; + // The actual security details message. + google.protobuf.Any value = 2; + } + oneof model { + Tls tls = 1; + OtherSecurity other = 2; + } +} + +// SocketOption represents socket options for a socket. Specifically, these +// are the options returned by getsockopt(). +message SocketOption { + // The full name of the socket option. Typically this will be the upper case + // name, such as "SO_REUSEPORT". + string name = 1; + // The human readable value of this socket option. At least one of value or + // additional will be set. + string value = 2; + // Additional data associated with the socket option. At least one of value + // or additional will be set. + google.protobuf.Any additional = 3; +} + +// For use with SocketOption's additional field. This is primarily used for +// SO_RCVTIMEO and SO_SNDTIMEO +message SocketOptionTimeout { + google.protobuf.Duration duration = 1; +} + +// For use with SocketOption's additional field. This is primarily used for +// SO_LINGER. +message SocketOptionLinger { + // active maps to `struct linger.l_onoff` + bool active = 1; + // duration maps to `struct linger.l_linger` + google.protobuf.Duration duration = 2; +} + +// For use with SocketOption's additional field. Tcp info for +// SOL_TCP and TCP_INFO. +message SocketOptionTcpInfo { + uint32 tcpi_state = 1; + + uint32 tcpi_ca_state = 2; + uint32 tcpi_retransmits = 3; + uint32 tcpi_probes = 4; + uint32 tcpi_backoff = 5; + uint32 tcpi_options = 6; + uint32 tcpi_snd_wscale = 7; + uint32 tcpi_rcv_wscale = 8; + + uint32 tcpi_rto = 9; + uint32 tcpi_ato = 10; + uint32 tcpi_snd_mss = 11; + uint32 tcpi_rcv_mss = 12; + + uint32 tcpi_unacked = 13; + uint32 tcpi_sacked = 14; + uint32 tcpi_lost = 15; + uint32 tcpi_retrans = 16; + uint32 tcpi_fackets = 17; + + uint32 tcpi_last_data_sent = 18; + uint32 tcpi_last_ack_sent = 19; + uint32 tcpi_last_data_recv = 20; + uint32 tcpi_last_ack_recv = 21; + + uint32 tcpi_pmtu = 22; + uint32 tcpi_rcv_ssthresh = 23; + uint32 tcpi_rtt = 24; + uint32 tcpi_rttvar = 25; + uint32 tcpi_snd_ssthresh = 26; + uint32 tcpi_snd_cwnd = 27; + uint32 tcpi_advmss = 28; + uint32 tcpi_reordering = 29; +} + +// Channelz is a service exposed by gRPC servers that provides detailed debug +// information. +service Channelz { + // Gets all root channels (i.e. channels the application has directly + // created). This does not include subchannels nor non-top level channels. + rpc GetTopChannels(GetTopChannelsRequest) returns (GetTopChannelsResponse); + // Gets all servers that exist in the process. + rpc GetServers(GetServersRequest) returns (GetServersResponse); + // Returns a single Server, or else a NOT_FOUND code. + rpc GetServer(GetServerRequest) returns (GetServerResponse); + // Gets all server sockets that exist in the process. + rpc GetServerSockets(GetServerSocketsRequest) returns (GetServerSocketsResponse); + // Returns a single Channel, or else a NOT_FOUND code. + rpc GetChannel(GetChannelRequest) returns (GetChannelResponse); + // Returns a single Subchannel, or else a NOT_FOUND code. + rpc GetSubchannel(GetSubchannelRequest) returns (GetSubchannelResponse); + // Returns a single Socket or else a NOT_FOUND code. + rpc GetSocket(GetSocketRequest) returns (GetSocketResponse); +} + +message GetTopChannelsRequest { + // start_channel_id indicates that only channels at or above this id should be + // included in the results. + // To request the first page, this should be set to 0. To request + // subsequent pages, the client generates this value by adding 1 to + // the highest seen result ID. + int64 start_channel_id = 1; + + // If non-zero, the server will return a page of results containing + // at most this many items. If zero, the server will choose a + // reasonable page size. Must never be negative. + int64 max_results = 2; +} + +message GetTopChannelsResponse { + // list of channels that the connection detail service knows about. Sorted in + // ascending channel_id order. + // Must contain at least 1 result, otherwise 'end' must be true. + repeated Channel channel = 1; + // If set, indicates that the list of channels is the final list. Requesting + // more channels can only return more if they are created after this RPC + // completes. + bool end = 2; +} + +message GetServersRequest { + // start_server_id indicates that only servers at or above this id should be + // included in the results. + // To request the first page, this must be set to 0. To request + // subsequent pages, the client generates this value by adding 1 to + // the highest seen result ID. + int64 start_server_id = 1; + + // If non-zero, the server will return a page of results containing + // at most this many items. If zero, the server will choose a + // reasonable page size. Must never be negative. + int64 max_results = 2; +} + +message GetServersResponse { + // list of servers that the connection detail service knows about. Sorted in + // ascending server_id order. + // Must contain at least 1 result, otherwise 'end' must be true. + repeated Server server = 1; + // If set, indicates that the list of servers is the final list. Requesting + // more servers will only return more if they are created after this RPC + // completes. + bool end = 2; +} + +message GetServerRequest { + // server_id is the identifier of the specific server to get. + int64 server_id = 1; +} + +message GetServerResponse { + // The Server that corresponds to the requested server_id. This field + // should be set. + Server server = 1; +} + +message GetServerSocketsRequest { + int64 server_id = 1; + // start_socket_id indicates that only sockets at or above this id should be + // included in the results. + // To request the first page, this must be set to 0. To request + // subsequent pages, the client generates this value by adding 1 to + // the highest seen result ID. + int64 start_socket_id = 2; + + // If non-zero, the server will return a page of results containing + // at most this many items. If zero, the server will choose a + // reasonable page size. Must never be negative. + int64 max_results = 3; +} + +message GetServerSocketsResponse { + // list of socket refs that the connection detail service knows about. Sorted in + // ascending socket_id order. + // Must contain at least 1 result, otherwise 'end' must be true. + repeated SocketRef socket_ref = 1; + // If set, indicates that the list of sockets is the final list. Requesting + // more sockets will only return more if they are created after this RPC + // completes. + bool end = 2; +} + +message GetChannelRequest { + // channel_id is the identifier of the specific channel to get. + int64 channel_id = 1; +} + +message GetChannelResponse { + // The Channel that corresponds to the requested channel_id. This field + // should be set. + Channel channel = 1; +} + +message GetSubchannelRequest { + // subchannel_id is the identifier of the specific subchannel to get. + int64 subchannel_id = 1; +} + +message GetSubchannelResponse { + // The Subchannel that corresponds to the requested subchannel_id. This + // field should be set. + Subchannel subchannel = 1; +} + +message GetSocketRequest { + // socket_id is the identifier of the specific socket to get. + int64 socket_id = 1; + + // If true, the response will contain only high level information + // that is inexpensive to obtain. Fields thay may be omitted are + // documented. + bool summary = 2; +} + +message GetSocketResponse { + // The Socket that corresponds to the requested socket_id. This field + // should be set. + Socket socket = 1; +} \ No newline at end of file diff --git a/packages/grpc-js/src/admin.ts b/packages/grpc-js/src/admin.ts new file mode 100644 index 000000000..4d26b89bd --- /dev/null +++ b/packages/grpc-js/src/admin.ts @@ -0,0 +1,45 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { ServiceDefinition } from './make-client'; +import { Server, UntypedServiceImplementation } from './server'; + +interface GetServiceDefinition { + (): ServiceDefinition; +} + +interface GetHandlers { + (): UntypedServiceImplementation; +} + +const registeredAdminServices: { + getServiceDefinition: GetServiceDefinition; + getHandlers: GetHandlers; +}[] = []; + +export function registerAdminService( + getServiceDefinition: GetServiceDefinition, + getHandlers: GetHandlers +) { + registeredAdminServices.push({ getServiceDefinition, getHandlers }); +} + +export function addAdminServicesToServer(server: Server): void { + for (const { getServiceDefinition, getHandlers } of registeredAdminServices) { + server.addService(getServiceDefinition(), getHandlers()); + } +} diff --git a/packages/grpc-js/src/backoff-timeout.ts b/packages/grpc-js/src/backoff-timeout.ts index 7f2ab5ebf..10d347e79 100644 --- a/packages/grpc-js/src/backoff-timeout.ts +++ b/packages/grpc-js/src/backoff-timeout.ts @@ -37,14 +37,52 @@ export interface BackoffOptions { } export class BackoffTimeout { - private initialDelay: number = INITIAL_BACKOFF_MS; - private multiplier: number = BACKOFF_MULTIPLIER; - private maxDelay: number = MAX_BACKOFF_MS; - private jitter: number = BACKOFF_JITTER; + /** + * The delay time at the start, and after each reset. + */ + private readonly initialDelay: number = INITIAL_BACKOFF_MS; + /** + * The exponential backoff multiplier. + */ + private readonly multiplier: number = BACKOFF_MULTIPLIER; + /** + * The maximum delay time + */ + private readonly maxDelay: number = MAX_BACKOFF_MS; + /** + * The maximum fraction by which the delay time can randomly vary after + * applying the multiplier. + */ + private readonly jitter: number = BACKOFF_JITTER; + /** + * The delay time for the next time the timer runs. + */ private nextDelay: number; - private timerId: NodeJS.Timer; + /** + * The handle of the underlying timer. If running is false, this value refers + * to an object representing a timer that has ended, but it can still be + * interacted with without error. + */ + private timerId: NodeJS.Timeout; + /** + * Indicates whether the timer is currently running. + */ private running = false; + /** + * Indicates whether the timer should keep the Node process running if no + * other async operation is doing so. + */ private hasRef = true; + /** + * The time that the currently running timer was started. Only valid if + * running is true. + */ + private startTime: Date = new Date(); + /** + * The approximate time that the currently running timer will end. Only valid + * if running is true. + */ + private endTime: Date = new Date(); constructor(private callback: () => void, options?: BackoffOptions) { if (options) { @@ -66,18 +104,28 @@ export class BackoffTimeout { clearTimeout(this.timerId); } - /** - * Call the callback after the current amount of delay time - */ - runOnce() { - this.running = true; + private runTimer(delay: number) { + this.endTime = this.startTime; + this.endTime.setMilliseconds( + this.endTime.getMilliseconds() + this.nextDelay + ); + clearTimeout(this.timerId); this.timerId = setTimeout(() => { this.callback(); this.running = false; - }, this.nextDelay); + }, delay); if (!this.hasRef) { this.timerId.unref?.(); } + } + + /** + * Call the callback after the current amount of delay time + */ + runOnce() { + this.running = true; + this.startTime = new Date(); + this.runTimer(this.nextDelay); const nextBackoff = Math.min( this.nextDelay * this.multiplier, this.maxDelay @@ -97,23 +145,54 @@ export class BackoffTimeout { } /** - * Reset the delay time to its initial value. + * Reset the delay time to its initial value. If the timer is still running, + * retroactively apply that reset to the current timer. */ reset() { this.nextDelay = this.initialDelay; + if (this.running) { + const now = new Date(); + const newEndTime = this.startTime; + newEndTime.setMilliseconds(newEndTime.getMilliseconds() + this.nextDelay); + clearTimeout(this.timerId); + if (now < newEndTime) { + this.runTimer(newEndTime.getTime() - now.getTime()); + } else { + this.running = false; + } + } } + /** + * Check whether the timer is currently running. + */ isRunning() { return this.running; } + /** + * Set that while the timer is running, it should keep the Node process + * running. + */ ref() { this.hasRef = true; this.timerId.ref?.(); } + /** + * Set that while the timer is running, it should not keep the Node process + * running. + */ unref() { this.hasRef = false; this.timerId.unref?.(); } + + /** + * Get the approximate timestamp of when the timer will fire. Only valid if + * this.isRunning() is true. + */ + getEndTime() { + return this.endTime; + } } diff --git a/packages/grpc-js/src/call-credentials-filter.ts b/packages/grpc-js/src/call-credentials-filter.ts deleted file mode 100644 index 53bdba2f1..000000000 --- a/packages/grpc-js/src/call-credentials-filter.ts +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -import { Call } from './call-stream'; -import { Channel } from './channel'; -import { BaseFilter, Filter, FilterFactory } from './filter'; -import { Metadata } from './metadata'; -import { Status } from './constants'; -import { splitHostPort } from './uri-parser'; -import { ServiceError } from './call'; - -export class CallCredentialsFilter extends BaseFilter implements Filter { - private serviceUrl: string; - constructor( - private readonly channel: Channel, - private readonly stream: Call - ) { - super(); - this.channel = channel; - this.stream = stream; - const splitPath: string[] = stream.getMethod().split('/'); - let serviceName = ''; - /* The standard path format is "/{serviceName}/{methodName}", so if we split - * by '/', the first item should be empty and the second should be the - * service name */ - if (splitPath.length >= 2) { - serviceName = splitPath[1]; - } - const hostname = splitHostPort(stream.getHost())?.host ?? 'localhost'; - /* Currently, call credentials are only allowed on HTTPS connections, so we - * can assume that the scheme is "https" */ - this.serviceUrl = `https://${hostname}/${serviceName}`; - } - - async sendMetadata(metadata: Promise): Promise { - const credentials = this.stream.getCredentials(); - const credsMetadata = credentials.generateMetadata({ - service_url: this.serviceUrl, - }); - const resultMetadata = await metadata; - try { - resultMetadata.merge(await credsMetadata); - } catch (error) { - this.stream.cancelWithStatus( - Status.UNAUTHENTICATED, - `Failed to retrieve auth metadata with error: ${error.message}` - ); - return Promise.reject('Failed to retrieve auth metadata'); - } - if (resultMetadata.get('authorization').length > 1) { - this.stream.cancelWithStatus( - Status.INTERNAL, - '"authorization" metadata cannot have multiple values' - ); - return Promise.reject('"authorization" metadata cannot have multiple values'); - } - return resultMetadata; - } -} - -export class CallCredentialsFilterFactory - implements FilterFactory { - constructor(private readonly channel: Channel) { - this.channel = channel; - } - - createFilter(callStream: Call): CallCredentialsFilter { - return new CallCredentialsFilter(this.channel, callStream); - } -} diff --git a/packages/grpc-js/src/call-credentials.ts b/packages/grpc-js/src/call-credentials.ts index bbc88a895..b0013eeae 100644 --- a/packages/grpc-js/src/call-credentials.ts +++ b/packages/grpc-js/src/call-credentials.ts @@ -115,20 +115,24 @@ export abstract class CallCredentials { reject(err); return; } + if (!headers) { + reject(new Error('Headers not set by metadata plugin')); + return; + } resolve(headers); } ); }); } getHeaders.then( - (headers) => { + headers => { const metadata = new Metadata(); for (const key of Object.keys(headers)) { metadata.add(key, headers[key]); } callback(null, metadata); }, - (err) => { + err => { callback(err); } ); @@ -148,7 +152,7 @@ class ComposedCallCredentials extends CallCredentials { async generateMetadata(options: CallMetadataOptions): Promise { const base: Metadata = new Metadata(); const generated: Metadata[] = await Promise.all( - this.creds.map((cred) => cred.generateMetadata(options)) + this.creds.map(cred => cred.generateMetadata(options)) ); for (const gen of generated) { base.merge(gen); diff --git a/packages/grpc-js/src/call-interface.ts b/packages/grpc-js/src/call-interface.ts new file mode 100644 index 000000000..c93c504f6 --- /dev/null +++ b/packages/grpc-js/src/call-interface.ts @@ -0,0 +1,177 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { CallCredentials } from './call-credentials'; +import { Status } from './constants'; +import { Deadline } from './deadline'; +import { Metadata } from './metadata'; +import { ServerSurfaceCall } from './server-call'; + +export interface CallStreamOptions { + deadline: Deadline; + flags: number; + host: string; + parentCall: ServerSurfaceCall | null; +} + +export type PartialCallStreamOptions = Partial; + +export interface StatusObject { + code: Status; + details: string; + metadata: Metadata; +} + +export type PartialStatusObject = Pick & { + metadata?: Metadata | null | undefined; +}; + +export const enum WriteFlags { + BufferHint = 1, + NoCompress = 2, + WriteThrough = 4, +} + +export interface WriteObject { + message: Buffer; + flags?: number; +} + +export interface MetadataListener { + (metadata: Metadata, next: (metadata: Metadata) => void): void; +} + +export interface MessageListener { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (message: any, next: (message: any) => void): void; +} + +export interface StatusListener { + (status: StatusObject, next: (status: StatusObject) => void): void; +} + +export interface FullListener { + onReceiveMetadata: MetadataListener; + onReceiveMessage: MessageListener; + onReceiveStatus: StatusListener; +} + +export type Listener = Partial; + +/** + * An object with methods for handling the responses to a call. + */ +export interface InterceptingListener { + onReceiveMetadata(metadata: Metadata): void; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + onReceiveMessage(message: any): void; + onReceiveStatus(status: StatusObject): void; +} + +export function isInterceptingListener( + listener: Listener | InterceptingListener +): listener is InterceptingListener { + return ( + listener.onReceiveMetadata !== undefined && + listener.onReceiveMetadata.length === 1 + ); +} + +export class InterceptingListenerImpl implements InterceptingListener { + private processingMetadata = false; + private hasPendingMessage = false; + private pendingMessage: any; + private processingMessage = false; + private pendingStatus: StatusObject | null = null; + constructor( + private listener: FullListener, + private nextListener: InterceptingListener + ) {} + + private processPendingMessage() { + if (this.hasPendingMessage) { + this.nextListener.onReceiveMessage(this.pendingMessage); + this.pendingMessage = null; + this.hasPendingMessage = false; + } + } + + private processPendingStatus() { + if (this.pendingStatus) { + this.nextListener.onReceiveStatus(this.pendingStatus); + } + } + + onReceiveMetadata(metadata: Metadata): void { + this.processingMetadata = true; + this.listener.onReceiveMetadata(metadata, metadata => { + this.processingMetadata = false; + this.nextListener.onReceiveMetadata(metadata); + this.processPendingMessage(); + this.processPendingStatus(); + }); + } + // eslint-disable-next-line @typescript-eslint/no-explicit-any + onReceiveMessage(message: any): void { + /* If this listener processes messages asynchronously, the last message may + * be reordered with respect to the status */ + this.processingMessage = true; + this.listener.onReceiveMessage(message, msg => { + this.processingMessage = false; + if (this.processingMetadata) { + this.pendingMessage = msg; + this.hasPendingMessage = true; + } else { + this.nextListener.onReceiveMessage(msg); + this.processPendingStatus(); + } + }); + } + onReceiveStatus(status: StatusObject): void { + this.listener.onReceiveStatus(status, processedStatus => { + if (this.processingMetadata || this.processingMessage) { + this.pendingStatus = processedStatus; + } else { + this.nextListener.onReceiveStatus(processedStatus); + } + }); + } +} + +export interface WriteCallback { + (error?: Error | null): void; +} + +export interface MessageContext { + callback?: WriteCallback; + flags?: number; +} + +export interface Call { + cancelWithStatus(status: Status, details: string): void; + getPeer(): string; + start(metadata: Metadata, listener: InterceptingListener): void; + sendMessageWithContext(context: MessageContext, message: Buffer): void; + startRead(): void; + halfClose(): void; + getCallNumber(): number; + setCredentials(credentials: CallCredentials): void; +} + +export interface DeadlineInfoProvider { + getDeadlineInfo(): string[]; +} diff --git a/packages/grpc-js/src/call-number.ts b/packages/grpc-js/src/call-number.ts new file mode 100644 index 000000000..8c37d3f91 --- /dev/null +++ b/packages/grpc-js/src/call-number.ts @@ -0,0 +1,22 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +let nextCallNumber = 0; + +export function getNextCallNumber() { + return nextCallNumber++; +} diff --git a/packages/grpc-js/src/call-stream.ts b/packages/grpc-js/src/call-stream.ts deleted file mode 100644 index 8d72bb3ee..000000000 --- a/packages/grpc-js/src/call-stream.ts +++ /dev/null @@ -1,792 +0,0 @@ -/* - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -import * as http2 from 'http2'; -import * as os from 'os'; - -import { CallCredentials } from './call-credentials'; -import { Propagate, Status } from './constants'; -import { Filter, FilterFactory } from './filter'; -import { FilterStackFactory, FilterStack } from './filter-stack'; -import { Metadata } from './metadata'; -import { StreamDecoder } from './stream-decoder'; -import { ChannelImplementation } from './channel'; -import { Subchannel } from './subchannel'; -import * as logging from './logging'; -import { LogVerbosity } from './constants'; -import { ServerSurfaceCall } from './server-call'; - -const TRACER_NAME = 'call_stream'; - -const { - HTTP2_HEADER_STATUS, - HTTP2_HEADER_CONTENT_TYPE, - NGHTTP2_CANCEL, -} = http2.constants; - -/** - * https://nodejs.org/api/errors.html#errors_class_systemerror - */ -interface SystemError extends Error { - address?: string; - code: string; - dest?: string; - errno: number; - info?: object; - message: string; - path?: string; - port?: number; - syscall: string; -} - -/** - * Should do approximately the same thing as util.getSystemErrorName but the - * TypeScript types don't have that function for some reason so I just made my - * own. - * @param errno - */ -function getSystemErrorName(errno: number): string { - for (const [name, num] of Object.entries(os.constants.errno)) { - if (num === errno) { - return name; - } - } - return 'Unknown system error ' + errno; -} - -export type Deadline = Date | number; - -export interface CallStreamOptions { - deadline: Deadline; - flags: number; - host: string; - parentCall: ServerSurfaceCall | null; -} - -export type PartialCallStreamOptions = Partial; - -export interface StatusObject { - code: Status; - details: string; - metadata: Metadata; -} - -export const enum WriteFlags { - BufferHint = 1, - NoCompress = 2, - WriteThrough = 4, -} - -export interface WriteObject { - message: Buffer; - flags?: number; -} - -export interface MetadataListener { - (metadata: Metadata, next: (metadata: Metadata) => void): void; -} - -export interface MessageListener { - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (message: any, next: (message: any) => void): void; -} - -export interface StatusListener { - (status: StatusObject, next: (status: StatusObject) => void): void; -} - -export interface FullListener { - onReceiveMetadata: MetadataListener; - onReceiveMessage: MessageListener; - onReceiveStatus: StatusListener; -} - -export type Listener = Partial; - -/** - * An object with methods for handling the responses to a call. - */ -export interface InterceptingListener { - onReceiveMetadata(metadata: Metadata): void; - // eslint-disable-next-line @typescript-eslint/no-explicit-any - onReceiveMessage(message: any): void; - onReceiveStatus(status: StatusObject): void; -} - -export function isInterceptingListener( - listener: Listener | InterceptingListener -): listener is InterceptingListener { - return ( - listener.onReceiveMetadata !== undefined && - listener.onReceiveMetadata.length === 1 - ); -} - -export class InterceptingListenerImpl implements InterceptingListener { - private processingMessage = false; - private pendingStatus: StatusObject | null = null; - constructor( - private listener: FullListener, - private nextListener: InterceptingListener - ) {} - - onReceiveMetadata(metadata: Metadata): void { - this.listener.onReceiveMetadata(metadata, (metadata) => { - this.nextListener.onReceiveMetadata(metadata); - }); - } - // eslint-disable-next-line @typescript-eslint/no-explicit-any - onReceiveMessage(message: any): void { - /* If this listener processes messages asynchronously, the last message may - * be reordered with respect to the status */ - this.processingMessage = true; - this.listener.onReceiveMessage(message, (msg) => { - this.processingMessage = false; - this.nextListener.onReceiveMessage(msg); - if (this.pendingStatus) { - this.nextListener.onReceiveStatus(this.pendingStatus); - } - }); - } - onReceiveStatus(status: StatusObject): void { - this.listener.onReceiveStatus(status, (processedStatus) => { - if (this.processingMessage) { - this.pendingStatus = processedStatus; - } else { - this.nextListener.onReceiveStatus(processedStatus); - } - }); - } -} - -export interface WriteCallback { - (error?: Error | null): void; -} - -export interface MessageContext { - callback?: WriteCallback; - flags?: number; -} - -export interface Call { - cancelWithStatus(status: Status, details: string): void; - getPeer(): string; - start(metadata: Metadata, listener: InterceptingListener): void; - sendMessageWithContext(context: MessageContext, message: Buffer): void; - startRead(): void; - halfClose(): void; - - getDeadline(): Deadline; - getCredentials(): CallCredentials; - setCredentials(credentials: CallCredentials): void; - getMethod(): string; - getHost(): string; -} - -export class Http2CallStream implements Call { - credentials: CallCredentials; - filterStack: Filter; - private http2Stream: http2.ClientHttp2Stream | null = null; - private pendingRead = false; - private isWriteFilterPending = false; - private pendingWrite: Buffer | null = null; - private pendingWriteCallback: WriteCallback | null = null; - private writesClosed = false; - - private decoder = new StreamDecoder(); - - private isReadFilterPending = false; - private canPush = false; - /** - * Indicates that an 'end' event has come from the http2 stream, so there - * will be no more data events. - */ - private readsClosed = false; - - private statusOutput = false; - - private unpushedReadMessages: Buffer[] = []; - private unfilteredReadMessages: Buffer[] = []; - - // Status code mapped from :status. To be used if grpc-status is not received - private mappedStatusCode: Status = Status.UNKNOWN; - - // This is populated (non-null) if and only if the call has ended - private finalStatus: StatusObject | null = null; - - private subchannel: Subchannel | null = null; - private disconnectListener: () => void; - - private listener: InterceptingListener | null = null; - - private internalError: SystemError | null = null; - - constructor( - private readonly methodName: string, - private readonly channel: ChannelImplementation, - private readonly options: CallStreamOptions, - filterStackFactory: FilterStackFactory, - private readonly channelCallCredentials: CallCredentials, - private readonly callNumber: number - ) { - this.filterStack = filterStackFactory.createFilter(this); - this.credentials = channelCallCredentials; - this.disconnectListener = () => { - this.endCall({ - code: Status.UNAVAILABLE, - details: 'Connection dropped', - metadata: new Metadata(), - }); - }; - if (this.options.parentCall && this.options.flags & Propagate.CANCELLATION) { - this.options.parentCall.on('cancelled', () => { - this.cancelWithStatus(Status.CANCELLED, 'Cancelled by parent call'); - }); - } - } - - private outputStatus() { - /* Precondition: this.finalStatus !== null */ - if (!this.statusOutput) { - this.statusOutput = true; - const filteredStatus = this.filterStack.receiveTrailers( - this.finalStatus! - ); - /* We delay the actual action of bubbling up the status to insulate the - * cleanup code in this class from any errors that may be thrown in the - * upper layers as a result of bubbling up the status. In particular, - * if the status is not OK, the "error" event may be emitted - * synchronously at the top level, which will result in a thrown error if - * the user does not handle that event. */ - process.nextTick(() => { - this.listener?.onReceiveStatus(filteredStatus); - }); - if (this.subchannel) { - this.subchannel.callUnref(); - this.subchannel.removeDisconnectListener(this.disconnectListener); - } - } - } - - private trace(text: string): void { - logging.trace( - LogVerbosity.DEBUG, - TRACER_NAME, - '[' + this.callNumber + '] ' + text - ); - } - - /** - * On first call, emits a 'status' event with the given StatusObject. - * Subsequent calls are no-ops. - * @param status The status of the call. - */ - private endCall(status: StatusObject): void { - /* If the status is OK and a new status comes in (e.g. from a - * deserialization failure), that new status takes priority */ - if (this.finalStatus === null || this.finalStatus.code === Status.OK) { - this.trace( - 'ended with status: code=' + - status.code + - ' details="' + - status.details + - '"' - ); - this.finalStatus = status; - this.maybeOutputStatus(); - } - this.destroyHttp2Stream(); - } - - private maybeOutputStatus() { - if (this.finalStatus !== null) { - /* The combination check of readsClosed and that the two message buffer - * arrays are empty checks that there all incoming data has been fully - * processed */ - if ( - this.finalStatus.code !== Status.OK || - (this.readsClosed && - this.unpushedReadMessages.length === 0 && - this.unfilteredReadMessages.length === 0 && - !this.isReadFilterPending) - ) { - this.outputStatus(); - } - } - } - - private push(message: Buffer): void { - this.trace( - 'pushing to reader message of length ' + - (message instanceof Buffer ? message.length : null) - ); - this.canPush = false; - process.nextTick(() => { - /* If we have already output the status any later messages should be - * ignored, and can cause out-of-order operation errors higher up in the - * stack. Checking as late as possible here to avoid any race conditions. - */ - if (this.statusOutput) { - return; - } - this.listener?.onReceiveMessage(message); - this.maybeOutputStatus(); - }); - } - - private handleFilterError(error: Error) { - this.cancelWithStatus(Status.INTERNAL, error.message); - } - - private handleFilteredRead(message: Buffer) { - /* If we the call has already ended with an error, we don't want to do - * anything with this message. Dropping it on the floor is correct - * behavior */ - if (this.finalStatus !== null && this.finalStatus.code !== Status.OK) { - this.maybeOutputStatus(); - return; - } - this.isReadFilterPending = false; - if (this.canPush) { - this.http2Stream!.pause(); - this.push(message); - } else { - this.trace( - 'unpushedReadMessages.push message of length ' + message.length - ); - this.unpushedReadMessages.push(message); - } - if (this.unfilteredReadMessages.length > 0) { - /* nextMessage is guaranteed not to be undefined because - unfilteredReadMessages is non-empty */ - const nextMessage = this.unfilteredReadMessages.shift()!; - this.filterReceivedMessage(nextMessage); - } - } - - private filterReceivedMessage(framedMessage: Buffer) { - /* If we the call has already ended with an error, we don't want to do - * anything with this message. Dropping it on the floor is correct - * behavior */ - if (this.finalStatus !== null && this.finalStatus.code !== Status.OK) { - this.maybeOutputStatus(); - return; - } - this.trace('filterReceivedMessage of length ' + framedMessage.length); - this.isReadFilterPending = true; - this.filterStack - .receiveMessage(Promise.resolve(framedMessage)) - .then( - this.handleFilteredRead.bind(this), - this.handleFilterError.bind(this) - ); - } - - private tryPush(messageBytes: Buffer): void { - if (this.isReadFilterPending) { - this.trace( - 'unfilteredReadMessages.push message of length ' + - (messageBytes && messageBytes.length) - ); - this.unfilteredReadMessages.push(messageBytes); - } else { - this.filterReceivedMessage(messageBytes); - } - } - - private handleTrailers(headers: http2.IncomingHttpHeaders) { - let headersString = ''; - for (const header of Object.keys(headers)) { - headersString += '\t\t' + header + ': ' + headers[header] + '\n'; - } - this.trace('Received server trailers:\n' + headersString); - let metadata: Metadata; - try { - metadata = Metadata.fromHttp2Headers(headers); - } catch (e) { - metadata = new Metadata(); - } - const metadataMap = metadata.getMap(); - let code: Status = this.mappedStatusCode; - if ( - code === Status.UNKNOWN && - typeof metadataMap['grpc-status'] === 'string' - ) { - const receivedStatus = Number(metadataMap['grpc-status']); - if (receivedStatus in Status) { - code = receivedStatus; - this.trace('received status code ' + receivedStatus + ' from server'); - } - metadata.remove('grpc-status'); - } - let details = ''; - if (typeof metadataMap['grpc-message'] === 'string') { - details = decodeURI(metadataMap['grpc-message']); - metadata.remove('grpc-message'); - this.trace( - 'received status details string "' + details + '" from server' - ); - } - const status: StatusObject = { code, details, metadata }; - // This is a no-op if the call was already ended when handling headers. - this.endCall(status); - } - - attachHttp2Stream( - stream: http2.ClientHttp2Stream, - subchannel: Subchannel, - extraFilterFactory?: FilterFactory - ): void { - if (extraFilterFactory !== undefined) { - this.filterStack = new FilterStack([ - this.filterStack, - extraFilterFactory.createFilter(this), - ]); - } - if (this.finalStatus !== null) { - stream.close(NGHTTP2_CANCEL); - } else { - this.trace( - 'attachHttp2Stream from subchannel ' + subchannel.getAddress() - ); - this.http2Stream = stream; - this.subchannel = subchannel; - subchannel.addDisconnectListener(this.disconnectListener); - subchannel.callRef(); - stream.on('response', (headers, flags) => { - let headersString = ''; - for (const header of Object.keys(headers)) { - headersString += '\t\t' + header + ': ' + headers[header] + '\n'; - } - this.trace('Received server headers:\n' + headersString); - switch (headers[':status']) { - // TODO(murgatroid99): handle 100 and 101 - case 400: - this.mappedStatusCode = Status.INTERNAL; - break; - case 401: - this.mappedStatusCode = Status.UNAUTHENTICATED; - break; - case 403: - this.mappedStatusCode = Status.PERMISSION_DENIED; - break; - case 404: - this.mappedStatusCode = Status.UNIMPLEMENTED; - break; - case 429: - case 502: - case 503: - case 504: - this.mappedStatusCode = Status.UNAVAILABLE; - break; - default: - this.mappedStatusCode = Status.UNKNOWN; - } - - if (flags & http2.constants.NGHTTP2_FLAG_END_STREAM) { - this.handleTrailers(headers); - } else { - let metadata: Metadata; - try { - metadata = Metadata.fromHttp2Headers(headers); - } catch (error) { - this.endCall({ - code: Status.UNKNOWN, - details: error.message, - metadata: new Metadata(), - }); - return; - } - try { - const finalMetadata = this.filterStack.receiveMetadata(metadata); - this.listener?.onReceiveMetadata(finalMetadata); - } catch (error) { - this.endCall({ - code: Status.UNKNOWN, - details: error.message, - metadata: new Metadata(), - }); - } - } - }); - stream.on('trailers', this.handleTrailers.bind(this)); - stream.on('data', (data: Buffer) => { - this.trace('receive HTTP/2 data frame of length ' + data.length); - const messages = this.decoder.write(data); - - for (const message of messages) { - this.trace('parsed message of length ' + message.length); - this.tryPush(message); - } - }); - stream.on('end', () => { - this.readsClosed = true; - this.maybeOutputStatus(); - }); - stream.on('close', () => { - /* Use process.next tick to ensure that this code happens after any - * "error" event that may be emitted at about the same time, so that - * we can bubble up the error message from that event. */ - process.nextTick(() => { - this.trace('HTTP/2 stream closed with code ' + stream.rstCode); - /* If we have a final status with an OK status code, that means that - * we have received all of the messages and we have processed the - * trailers and the call completed successfully, so it doesn't matter - * how the stream ends after that */ - if (this.finalStatus?.code === Status.OK) { - return; - } - let code: Status; - let details = ''; - switch (stream.rstCode) { - case http2.constants.NGHTTP2_NO_ERROR: - /* If we get a NO_ERROR code and we already have a status, the - * stream completed properly and we just haven't fully processed - * it yet */ - if (this.finalStatus !== null) { - return; - } - code = Status.INTERNAL; - details = `Received RST_STREAM with code ${stream.rstCode}`; - break; - case http2.constants.NGHTTP2_REFUSED_STREAM: - code = Status.UNAVAILABLE; - details = 'Stream refused by server'; - break; - case http2.constants.NGHTTP2_CANCEL: - code = Status.CANCELLED; - details = 'Call cancelled'; - break; - case http2.constants.NGHTTP2_ENHANCE_YOUR_CALM: - code = Status.RESOURCE_EXHAUSTED; - details = 'Bandwidth exhausted'; - break; - case http2.constants.NGHTTP2_INADEQUATE_SECURITY: - code = Status.PERMISSION_DENIED; - details = 'Protocol not secure enough'; - break; - case http2.constants.NGHTTP2_INTERNAL_ERROR: - code = Status.INTERNAL; - if (this.internalError === null) { - /* This error code was previously handled in the default case, and - * there are several instances of it online, so I wanted to - * preserve the original error message so that people find existing - * information in searches, but also include the more recognizable - * "Internal server error" message. */ - details = `Received RST_STREAM with code ${stream.rstCode} (Internal server error)`; - } else { - if (this.internalError.code === 'ECONNRESET') { - code = Status.UNAVAILABLE; - details = this.internalError.message; - } else { - /* The "Received RST_STREAM with code ..." error is preserved - * here for continuity with errors reported online, but the - * error message at the end will probably be more relevant in - * most cases. */ - details = `Received RST_STREAM with code ${stream.rstCode} triggered by internal client error: ${this.internalError.message}`; - } - } - break; - default: - code = Status.INTERNAL; - details = `Received RST_STREAM with code ${stream.rstCode}`; - } - // This is a no-op if trailers were received at all. - // This is OK, because status codes emitted here correspond to more - // catastrophic issues that prevent us from receiving trailers in the - // first place. - this.endCall({ code, details, metadata: new Metadata() }); - }); - }); - stream.on('error', (err: SystemError) => { - /* We need an error handler here to stop "Uncaught Error" exceptions - * from bubbling up. However, errors here should all correspond to - * "close" events, where we will handle the error more granularly */ - /* Specifically looking for stream errors that were *not* constructed - * from a RST_STREAM response here: - * https://github.com/nodejs/node/blob/8b8620d580314050175983402dfddf2674e8e22a/lib/internal/http2/core.js#L2267 - */ - if (err.code !== 'ERR_HTTP2_STREAM_ERROR') { - this.trace('Node error event: message=' + err.message + ' code=' + err.code + ' errno=' + getSystemErrorName(err.errno) + ' syscall=' + err.syscall); - this.internalError = err; - } - }); - if (!this.pendingRead) { - stream.pause(); - } - if (this.pendingWrite) { - if (!this.pendingWriteCallback) { - throw new Error('Invalid state in write handling code'); - } - this.trace( - 'sending data chunk of length ' + - this.pendingWrite.length + - ' (deferred)' - ); - try { - stream.write(this.pendingWrite, this.pendingWriteCallback); - } catch (error) { - this.endCall({ - code: Status.UNAVAILABLE, - details: `Write failed with error ${error.message}`, - metadata: new Metadata() - }); - } - } - this.maybeCloseWrites(); - } - } - - start(metadata: Metadata, listener: InterceptingListener) { - this.trace('Sending metadata'); - this.listener = listener; - this.channel._startCallStream(this, metadata); - } - - private destroyHttp2Stream() { - // The http2 stream could already have been destroyed if cancelWithStatus - // is called in response to an internal http2 error. - if (this.http2Stream !== null && !this.http2Stream.destroyed) { - /* If the call has ended with an OK status, communicate that when closing - * the stream, partly to avoid a situation in which we detect an error - * RST_STREAM as a result after we have the status */ - let code: number; - if (this.finalStatus?.code === Status.OK) { - code = http2.constants.NGHTTP2_NO_ERROR; - } else { - code = http2.constants.NGHTTP2_CANCEL; - } - this.trace('close http2 stream with code ' + code); - this.http2Stream.close(code); - } - } - - cancelWithStatus(status: Status, details: string): void { - this.trace( - 'cancelWithStatus code: ' + status + ' details: "' + details + '"' - ); - this.endCall({ code: status, details, metadata: new Metadata() }); - } - - getDeadline(): Deadline { - if (this.options.parentCall && this.options.flags & Propagate.DEADLINE) { - const parentDeadline = this.options.parentCall.getDeadline(); - const selfDeadline = this.options.deadline; - const parentDeadlineMsecs = parentDeadline instanceof Date ? parentDeadline.getTime() : parentDeadline; - const selfDeadlineMsecs = selfDeadline instanceof Date ? selfDeadline.getTime() : selfDeadline; - return Math.min(parentDeadlineMsecs, selfDeadlineMsecs); - } else { - return this.options.deadline; - } - } - - getCredentials(): CallCredentials { - return this.credentials; - } - - setCredentials(credentials: CallCredentials): void { - this.credentials = this.channelCallCredentials.compose(credentials); - } - - getStatus(): StatusObject | null { - return this.finalStatus; - } - - getPeer(): string { - return this.subchannel?.getAddress() ?? this.channel.getTarget(); - } - - getMethod(): string { - return this.methodName; - } - - getHost(): string { - return this.options.host; - } - - startRead() { - /* If the stream has ended with an error, we should not emit any more - * messages and we should communicate that the stream has ended */ - if (this.finalStatus !== null && this.finalStatus.code !== Status.OK) { - this.readsClosed = true; - this.maybeOutputStatus(); - return; - } - this.canPush = true; - if (this.http2Stream === null) { - this.pendingRead = true; - } else { - if (this.unpushedReadMessages.length > 0) { - const nextMessage: Buffer = this.unpushedReadMessages.shift()!; - this.push(nextMessage); - return; - } - /* Only resume reading from the http2Stream if we don't have any pending - * messages to emit */ - this.http2Stream.resume(); - } - } - - private maybeCloseWrites() { - if ( - this.writesClosed && - !this.isWriteFilterPending && - this.http2Stream !== null - ) { - this.trace('calling end() on HTTP/2 stream'); - this.http2Stream.end(); - } - } - - sendMessageWithContext(context: MessageContext, message: Buffer) { - this.trace('write() called with message of length ' + message.length); - const writeObj: WriteObject = { - message, - flags: context.flags, - }; - const cb: WriteCallback = context.callback ?? (() => {}); - this.isWriteFilterPending = true; - this.filterStack.sendMessage(Promise.resolve(writeObj)).then((message) => { - this.isWriteFilterPending = false; - if (this.http2Stream === null) { - this.trace( - 'deferring writing data chunk of length ' + message.message.length - ); - this.pendingWrite = message.message; - this.pendingWriteCallback = cb; - } else { - this.trace('sending data chunk of length ' + message.message.length); - try { - this.http2Stream.write(message.message, cb); - } catch (error) { - this.endCall({ - code: Status.UNAVAILABLE, - details: `Write failed with error ${error.message}`, - metadata: new Metadata() - }); - } - this.maybeCloseWrites(); - } - }, this.handleFilterError.bind(this)); - } - - halfClose() { - this.trace('end() called'); - this.writesClosed = true; - this.maybeCloseWrites(); - } -} diff --git a/packages/grpc-js/src/call.ts b/packages/grpc-js/src/call.ts index cfe37ecfb..a147c98bc 100644 --- a/packages/grpc-js/src/call.ts +++ b/packages/grpc-js/src/call.ts @@ -18,7 +18,7 @@ import { EventEmitter } from 'events'; import { Duplex, Readable, Writable } from 'stream'; -import { StatusObject, MessageContext } from './call-stream'; +import { StatusObject, MessageContext } from './call-interface'; import { Status } from './constants'; import { EmitterAugmentation1 } from './events'; import { Metadata } from './metadata'; @@ -65,10 +65,8 @@ export type ClientWritableStream = { /** * A type representing the return value of a bidirectional stream method call. */ -export type ClientDuplexStream< - RequestType, - ResponseType -> = ClientWritableStream & ClientReadableStream; +export type ClientDuplexStream = + ClientWritableStream & ClientReadableStream; /** * Construct a ServiceError from a StatusObject. This function exists primarily @@ -76,13 +74,20 @@ export type ClientDuplexStream< * error is not necessarily a problem in gRPC itself. * @param status */ -export function callErrorFromStatus(status: StatusObject): ServiceError { +export function callErrorFromStatus( + status: StatusObject, + callerStack: string +): ServiceError { const message = `${status.code} ${Status[status.code]}: ${status.details}`; - return Object.assign(new Error(message), status); + const error = new Error(message); + const stack = `${error.stack}\nfor call at\n${callerStack}`; + return Object.assign(new Error(message), status, { stack }); } -export class ClientUnaryCallImpl extends EventEmitter - implements ClientUnaryCall { +export class ClientUnaryCallImpl + extends EventEmitter + implements ClientUnaryCall +{ public call?: InterceptingCallInterface; constructor() { super(); @@ -97,8 +102,10 @@ export class ClientUnaryCallImpl extends EventEmitter } } -export class ClientReadableStreamImpl extends Readable - implements ClientReadableStream { +export class ClientReadableStreamImpl + extends Readable + implements ClientReadableStream +{ public call?: InterceptingCallInterface; constructor(readonly deserialize: (chunk: Buffer) => ResponseType) { super({ objectMode: true }); @@ -117,8 +124,10 @@ export class ClientReadableStreamImpl extends Readable } } -export class ClientWritableStreamImpl extends Writable - implements ClientWritableStream { +export class ClientWritableStreamImpl + extends Writable + implements ClientWritableStream +{ public call?: InterceptingCallInterface; constructor(readonly serialize: (value: RequestType) => Buffer) { super({ objectMode: true }); @@ -149,8 +158,10 @@ export class ClientWritableStreamImpl extends Writable } } -export class ClientDuplexStreamImpl extends Duplex - implements ClientDuplexStream { +export class ClientDuplexStreamImpl + extends Duplex + implements ClientDuplexStream +{ public call?: InterceptingCallInterface; constructor( readonly serialize: (value: RequestType) => Buffer, diff --git a/packages/grpc-js/src/channel-credentials.ts b/packages/grpc-js/src/channel-credentials.ts index 675e91628..2ed18507f 100644 --- a/packages/grpc-js/src/channel-credentials.ts +++ b/packages/grpc-js/src/channel-credentials.ts @@ -15,7 +15,12 @@ * */ -import { ConnectionOptions, createSecureContext, PeerCertificate } from 'tls'; +import { + ConnectionOptions, + createSecureContext, + PeerCertificate, + SecureContext, +} from 'tls'; import { CallCredentials } from './call-credentials'; import { CIPHER_SUITES, getDefaultRootsData } from './tls-helpers'; @@ -27,16 +32,6 @@ function verifyIsBufferOrNull(obj: any, friendlyName: string): void { } } -/** - * A certificate as received by the checkServerIdentity callback. - */ -export interface Certificate { - /** - * The raw certificate in DER form. - */ - raw: Buffer; -} - /** * A callback that will receive the expected hostname and presented peer * certificate as parameters. The callback should return an error to @@ -45,17 +40,9 @@ export interface Certificate { */ export type CheckServerIdentityCallback = ( hostname: string, - cert: Certificate + cert: PeerCertificate ) => Error | undefined; -function bufferOrNullEqual(buf1: Buffer | null, buf2: Buffer | null) { - if (buf1 === null && buf2 === null) { - return true; - } else { - return buf1 !== null && buf2 !== null && buf1.equals(buf2); - } -} - /** * Additional peer verification options that can be set when creating * SSL credentials. @@ -120,6 +107,7 @@ export abstract class ChannelCredentials { * @param rootCerts The root certificate data. * @param privateKey The client certificate private key, if available. * @param certChain The client certificate key chain, if available. + * @param verifyOptions Additional options to modify certificate verification */ static createSsl( rootCerts?: Buffer | null, @@ -140,12 +128,30 @@ export abstract class ChannelCredentials { 'Certificate chain must be given with accompanying private key' ); } - return new SecureChannelCredentialsImpl( - rootCerts || getDefaultRootsData(), - privateKey || null, - certChain || null, - verifyOptions || {} - ); + const secureContext = createSecureContext({ + ca: rootCerts ?? getDefaultRootsData() ?? undefined, + key: privateKey ?? undefined, + cert: certChain ?? undefined, + ciphers: CIPHER_SUITES, + }); + return new SecureChannelCredentialsImpl(secureContext, verifyOptions ?? {}); + } + + /** + * Return a new ChannelCredentials instance with credentials created using + * the provided secureContext. The resulting instances can be used to + * construct a Channel that communicates over TLS. gRPC will not override + * anything in the provided secureContext, so the environment variables + * GRPC_SSL_CIPHER_SUITES and GRPC_DEFAULT_SSL_ROOTS_FILE_PATH will + * not be applied. + * @param secureContext The return value of tls.createSecureContext() + * @param verifyOptions Additional options to modify certificate verification + */ + static createFromSecureContext( + secureContext: SecureContext, + verifyOptions?: VerifyOptions + ): ChannelCredentials { + return new SecureChannelCredentialsImpl(secureContext, verifyOptions ?? {}); } /** @@ -157,11 +163,11 @@ export abstract class ChannelCredentials { } class InsecureChannelCredentialsImpl extends ChannelCredentials { - constructor(callCredentials?: CallCredentials) { - super(callCredentials); + constructor() { + super(); } - compose(callCredentials: CallCredentials): ChannelCredentials { + compose(callCredentials: CallCredentials): never { throw new Error('Cannot compose insecure credentials'); } @@ -180,33 +186,23 @@ class SecureChannelCredentialsImpl extends ChannelCredentials { connectionOptions: ConnectionOptions; constructor( - private rootCerts: Buffer | null, - private privateKey: Buffer | null, - private certChain: Buffer | null, + private secureContext: SecureContext, private verifyOptions: VerifyOptions ) { super(); - const secureContext = createSecureContext({ - ca: rootCerts || undefined, - key: privateKey || undefined, - cert: certChain || undefined, - ciphers: CIPHER_SUITES, - }); - this.connectionOptions = { secureContext }; - if (verifyOptions && verifyOptions.checkServerIdentity) { - this.connectionOptions.checkServerIdentity = ( - host: string, - cert: PeerCertificate - ) => { - return verifyOptions.checkServerIdentity!(host, { raw: cert.raw }); - }; + this.connectionOptions = { + secureContext, + }; + // Node asserts that this option is a function, so we cannot pass undefined + if (verifyOptions?.checkServerIdentity) { + this.connectionOptions.checkServerIdentity = + verifyOptions.checkServerIdentity; } } compose(callCredentials: CallCredentials): ChannelCredentials { - const combinedCallCredentials = this.callCredentials.compose( - callCredentials - ); + const combinedCallCredentials = + this.callCredentials.compose(callCredentials); return new ComposedChannelCredentialsImpl(this, combinedCallCredentials); } @@ -222,18 +218,10 @@ class SecureChannelCredentialsImpl extends ChannelCredentials { return true; } if (other instanceof SecureChannelCredentialsImpl) { - if (!bufferOrNullEqual(this.rootCerts, other.rootCerts)) { - return false; - } - if (!bufferOrNullEqual(this.privateKey, other.privateKey)) { - return false; - } - if (!bufferOrNullEqual(this.certChain, other.certChain)) { - return false; - } return ( + this.secureContext === other.secureContext && this.verifyOptions.checkServerIdentity === - other.verifyOptions.checkServerIdentity + other.verifyOptions.checkServerIdentity ); } else { return false; @@ -249,9 +237,8 @@ class ComposedChannelCredentialsImpl extends ChannelCredentials { super(callCreds); } compose(callCredentials: CallCredentials) { - const combinedCallCredentials = this.callCredentials.compose( - callCredentials - ); + const combinedCallCredentials = + this.callCredentials.compose(callCredentials); return new ComposedChannelCredentialsImpl( this.channelCredentials, combinedCallCredentials diff --git a/packages/grpc-js/src/channel-options.ts b/packages/grpc-js/src/channel-options.ts index ebb724b0e..6804852e2 100644 --- a/packages/grpc-js/src/channel-options.ts +++ b/packages/grpc-js/src/channel-options.ts @@ -15,6 +15,8 @@ * */ +import { CompressionAlgorithms } from './compression-algorithms'; + /** * An interface that contains options used when initializing a Channel instance. */ @@ -34,9 +36,34 @@ export interface ChannelOptions { 'grpc.max_send_message_length'?: number; 'grpc.max_receive_message_length'?: number; 'grpc.enable_http_proxy'?: number; + /* http_connect_target and http_connect_creds are used for passing data + * around internally, and should not be documented as public-facing options + */ 'grpc.http_connect_target'?: string; 'grpc.http_connect_creds'?: string; + 'grpc.default_compression_algorithm'?: CompressionAlgorithms; + 'grpc.enable_channelz'?: number; + 'grpc.dns_min_time_between_resolutions_ms'?: number; + 'grpc.enable_retries'?: number; + 'grpc.per_rpc_retry_buffer_size'?: number; + /* This option is pattered like a core option, but the core does not have + * this option. It is closely related to the option + * grpc.per_rpc_retry_buffer_size, which is in the core. The core will likely + * implement this functionality using the ResourceQuota mechanism, so there + * will probably not be any collision or other inconsistency. */ + 'grpc.retry_buffer_size'?: number; + 'grpc.max_connection_age_ms'?: number; + 'grpc.max_connection_age_grace_ms'?: number; + 'grpc.max_connection_idle_ms'?: number; 'grpc-node.max_session_memory'?: number; + 'grpc.service_config_disable_resolution'?: number; + 'grpc.client_idle_timeout_ms'?: number; + /** + * Set the enableTrace option in TLS clients and servers + */ + 'grpc-node.tls_enable_trace'?: number; + 'grpc.lb.ring_hash.ring_size_cap'?: number; + // eslint-disable-next-line @typescript-eslint/no-explicit-any [key: string]: any; } @@ -60,7 +87,18 @@ export const recognizedOptions = { 'grpc.max_send_message_length': true, 'grpc.max_receive_message_length': true, 'grpc.enable_http_proxy': true, + 'grpc.enable_channelz': true, + 'grpc.dns_min_time_between_resolutions_ms': true, + 'grpc.enable_retries': true, + 'grpc.per_rpc_retry_buffer_size': true, + 'grpc.retry_buffer_size': true, + 'grpc.max_connection_age_ms': true, + 'grpc.max_connection_age_grace_ms': true, 'grpc-node.max_session_memory': true, + 'grpc.service_config_disable_resolution': true, + 'grpc.client_idle_timeout_ms': true, + 'grpc-node.tls_enable_trace': true, + 'grpc.lb.ring_hash.ring_size_cap': true, }; export function channelOptionsEqual( diff --git a/packages/grpc-js/src/channel.ts b/packages/grpc-js/src/channel.ts index 41715c41e..514920c8f 100644 --- a/packages/grpc-js/src/channel.ts +++ b/packages/grpc-js/src/channel.ts @@ -15,56 +15,15 @@ * */ -import { - Deadline, - Call, - Http2CallStream, - CallStreamOptions, -} from './call-stream'; import { ChannelCredentials } from './channel-credentials'; import { ChannelOptions } from './channel-options'; -import { ResolvingLoadBalancer } from './resolving-load-balancer'; -import { SubchannelPool, getSubchannelPool } from './subchannel-pool'; -import { ChannelControlHelper } from './load-balancer'; -import { UnavailablePicker, Picker, PickResultType } from './picker'; -import { Metadata } from './metadata'; -import { Status, LogVerbosity, Propagate } from './constants'; -import { FilterStackFactory } from './filter-stack'; -import { CallCredentialsFilterFactory } from './call-credentials-filter'; -import { DeadlineFilterFactory } from './deadline-filter'; -import { CompressionFilterFactory } from './compression-filter'; -import { CallConfig, ConfigSelector, getDefaultAuthority, mapUriDefaultScheme } from './resolver'; -import { trace, log } from './logging'; -import { SubchannelAddress } from './subchannel'; -import { MaxMessageSizeFilterFactory } from './max-message-size-filter'; -import { mapProxyName } from './http_proxy'; -import { GrpcUri, parseUri, uriToString } from './uri-parser'; import { ServerSurfaceCall } from './server-call'; -import { SurfaceCall } from './call'; -export enum ConnectivityState { - IDLE, - CONNECTING, - READY, - TRANSIENT_FAILURE, - SHUTDOWN, -} - -/** - * See https://nodejs.org/api/timers.html#timers_setinterval_callback_delay_args - */ -const MAX_TIMEOUT_TIME = 2147483647; - -let nextCallNumber = 0; - -function getNewCallNumber(): number { - const callNumber = nextCallNumber; - nextCallNumber += 1; - if (nextCallNumber >= Number.MAX_SAFE_INTEGER) { - nextCallNumber = 0; - } - return callNumber; -} +import { ConnectivityState } from './connectivity-state'; +import type { ChannelRef } from './channelz'; +import { Call } from './call-interface'; +import { InternalChannel } from './internal-channel'; +import { Deadline } from './deadline'; /** * An interface that represents a communication channel to a server specified @@ -104,6 +63,12 @@ export interface Channel { deadline: Date | number, callback: (error?: Error) => void ): void; + /** + * Get the channelz reference object for this channel. A request to the + * channelz service for the id in this object will provide information + * about this channel. + */ + getChannelzRef(): ChannelRef; /** * Create a call object. Call is an opaque type that is used by the Client * class. This function is called by the gRPC library when starting a @@ -125,47 +90,13 @@ export interface Channel { ): Call; } -interface ConnectivityStateWatcher { - currentState: ConnectivityState; - timer: NodeJS.Timeout | null; - callback: (error?: Error) => void; -} - export class ChannelImplementation implements Channel { - private resolvingLoadBalancer: ResolvingLoadBalancer; - private subchannelPool: SubchannelPool; - private connectivityState: ConnectivityState = ConnectivityState.IDLE; - private currentPicker: Picker = new UnavailablePicker(); - /** - * Calls queued up to get a call config. Should only be populated before the - * first time the resolver returns a result, which includes the ConfigSelector. - */ - private configSelectionQueue: Array<{ - callStream: Http2CallStream; - callMetadata: Metadata; - }> = []; - private pickQueue: Array<{ - callStream: Http2CallStream; - callMetadata: Metadata; - callConfig: CallConfig; - }> = []; - private connectivityStateWatchers: ConnectivityStateWatcher[] = []; - private defaultAuthority: string; - private filterStackFactory: FilterStackFactory; - private target: GrpcUri; - /** - * This timer does not do anything on its own. Its purpose is to hold the - * event loop open while there are any pending calls for the channel that - * have not yet been assigned to specific subchannels. In other words, - * the invariant is that callRefTimer is reffed if and only if pickQueue - * is non-empty. - */ - private callRefTimer: NodeJS.Timer; - private configSelector: ConfigSelector | null = null; + private internalChannel: InternalChannel; + constructor( target: string, - private readonly credentials: ChannelCredentials, - private readonly options: ChannelOptions + credentials: ChannelCredentials, + options: ChannelOptions ) { if (typeof target !== 'string') { throw new TypeError('Channel target must be a string'); @@ -176,382 +107,24 @@ export class ChannelImplementation implements Channel { ); } if (options) { - if ( - typeof options !== 'object' || - !Object.values(options).every( - (value) => - typeof value === 'string' || - typeof value === 'number' || - typeof value === 'undefined' - ) - ) { - throw new TypeError( - 'Channel options must be an object with string or number values' - ); - } - } - const originalTargetUri = parseUri(target); - if (originalTargetUri === null) { - throw new Error(`Could not parse target name "${target}"`); - } - /* This ensures that the target has a scheme that is registered with the - * resolver */ - const defaultSchemeMapResult = mapUriDefaultScheme(originalTargetUri); - if (defaultSchemeMapResult === null) { - throw new Error( - `Could not find a default scheme for target name "${target}"` - ); - } - - this.callRefTimer = setInterval(() => {}, MAX_TIMEOUT_TIME); - this.callRefTimer.unref?.(); - - if (this.options['grpc.default_authority']) { - this.defaultAuthority = this.options['grpc.default_authority'] as string; - } else { - this.defaultAuthority = getDefaultAuthority(defaultSchemeMapResult); - } - const proxyMapResult = mapProxyName(defaultSchemeMapResult, options); - this.target = proxyMapResult.target; - this.options = Object.assign({}, this.options, proxyMapResult.extraOptions); - - /* The global boolean parameter to getSubchannelPool has the inverse meaning to what - * the grpc.use_local_subchannel_pool channel option means. */ - this.subchannelPool = getSubchannelPool( - (options['grpc.use_local_subchannel_pool'] ?? 0) === 0 - ); - const channelControlHelper: ChannelControlHelper = { - createSubchannel: ( - subchannelAddress: SubchannelAddress, - subchannelArgs: ChannelOptions - ) => { - return this.subchannelPool.getOrCreateSubchannel( - this.target, - subchannelAddress, - Object.assign({}, this.options, subchannelArgs), - this.credentials - ); - }, - updateState: (connectivityState: ConnectivityState, picker: Picker) => { - this.currentPicker = picker; - const queueCopy = this.pickQueue.slice(); - this.pickQueue = []; - this.callRefTimerUnref(); - for (const { callStream, callMetadata, callConfig } of queueCopy) { - this.tryPick(callStream, callMetadata, callConfig); - } - this.updateState(connectivityState); - }, - requestReresolution: () => { - // This should never be called. - throw new Error( - 'Resolving load balancer should never call requestReresolution' - ); - }, - }; - this.resolvingLoadBalancer = new ResolvingLoadBalancer( - this.target, - channelControlHelper, - options, - (configSelector) => { - this.configSelector = configSelector; - /* We process the queue asynchronously to ensure that the corresponding - * load balancer update has completed. */ - process.nextTick(() => { - const localQueue = this.configSelectionQueue; - this.configSelectionQueue = []; - this.callRefTimerUnref() - for (const {callStream, callMetadata} of localQueue) { - this.tryGetConfig(callStream, callMetadata); - } - this.configSelectionQueue = []; - }); - }, - (status) => { - if (this.configSelectionQueue.length > 0) { - trace(LogVerbosity.DEBUG, 'channel', 'Name resolution failed for target ' + uriToString(this.target) + ' with calls queued for config selection'); - } - const localQueue = this.configSelectionQueue; - this.configSelectionQueue = []; - this.callRefTimerUnref(); - for (const {callStream, callMetadata} of localQueue) { - if (callMetadata.getOptions().waitForReady) { - this.callRefTimerRef(); - this.configSelectionQueue.push({callStream, callMetadata}); - } else { - callStream.cancelWithStatus(status.code, status.details); - } - } - } - ); - this.filterStackFactory = new FilterStackFactory([ - new CallCredentialsFilterFactory(this), - new DeadlineFilterFactory(this), - new MaxMessageSizeFilterFactory(this.options), - new CompressionFilterFactory(this), - ]); - } - - private callRefTimerRef() { - // If the hasRef function does not exist, always run the code - if (!this.callRefTimer.hasRef?.()) { - trace(LogVerbosity.DEBUG, 'channel', 'callRefTimer.ref | configSelectionQueue.length=' + this.configSelectionQueue.length + ' pickQueue.length=' + this.pickQueue.length); - this.callRefTimer.ref?.(); - } - } - - private callRefTimerUnref() { - // If the hasRef function does not exist, always run the code - if ((!this.callRefTimer.hasRef) || (this.callRefTimer.hasRef())) { - trace(LogVerbosity.DEBUG, 'channel', 'callRefTimer.unref | configSelectionQueue.length=' + this.configSelectionQueue.length + ' pickQueue.length=' + this.pickQueue.length); - this.callRefTimer.unref?.(); - } - } - - private pushPick(callStream: Http2CallStream, callMetadata: Metadata, callConfig: CallConfig) { - this.pickQueue.push({ callStream, callMetadata, callConfig }); - this.callRefTimerRef(); - } - - /** - * Check the picker output for the given call and corresponding metadata, - * and take any relevant actions. Should not be called while iterating - * over pickQueue. - * @param callStream - * @param callMetadata - */ - private tryPick(callStream: Http2CallStream, callMetadata: Metadata, callConfig: CallConfig) { - const pickResult = this.currentPicker.pick({ metadata: callMetadata, extraPickInfo: callConfig.pickInformation }); - trace( - LogVerbosity.DEBUG, - 'channel', - 'Pick result: ' + - PickResultType[pickResult.pickResultType] + - ' subchannel: ' + - pickResult.subchannel?.getAddress() + - ' status: ' + - pickResult.status?.code + - ' ' + - pickResult.status?.details - ); - switch (pickResult.pickResultType) { - case PickResultType.COMPLETE: - if (pickResult.subchannel === null) { - callStream.cancelWithStatus( - Status.UNAVAILABLE, - 'Request dropped by load balancing policy' - ); - // End the call with an error - } else { - /* If the subchannel is not in the READY state, that indicates a bug - * somewhere in the load balancer or picker. So, we log an error and - * queue the pick to be tried again later. */ - if ( - pickResult.subchannel!.getConnectivityState() !== - ConnectivityState.READY - ) { - log( - LogVerbosity.ERROR, - 'Error: COMPLETE pick result subchannel ' + - pickResult.subchannel!.getAddress() + - ' has state ' + - ConnectivityState[pickResult.subchannel!.getConnectivityState()] - ); - this.pushPick(callStream, callMetadata, callConfig); - break; - } - /* We need to clone the callMetadata here because the transparent - * retry code in the promise resolution handler use the same - * callMetadata object, so it needs to stay unmodified */ - callStream.filterStack - .sendMetadata(Promise.resolve(callMetadata.clone())) - .then( - (finalMetadata) => { - const subchannelState: ConnectivityState = pickResult.subchannel!.getConnectivityState(); - if (subchannelState === ConnectivityState.READY) { - try { - pickResult.subchannel!.startCallStream( - finalMetadata, - callStream, - pickResult.extraFilterFactory ?? undefined - ); - /* If we reach this point, the call stream has started - * successfully */ - callConfig.onCommitted?.(); - pickResult.onCallStarted?.(); - } catch (error) { - if ( - (error as NodeJS.ErrnoException).code === - 'ERR_HTTP2_GOAWAY_SESSION' - ) { - /* An error here indicates that something went wrong with - * the picked subchannel's http2 stream right before we - * tried to start the stream. We are handling a promise - * result here, so this is asynchronous with respect to the - * original tryPick call, so calling it again is not - * recursive. We call tryPick immediately instead of - * queueing this pick again because handling the queue is - * triggered by state changes, and we want to immediately - * check if the state has already changed since the - * previous tryPick call. We do this instead of cancelling - * the stream because the correct behavior may be - * re-queueing instead, based on the logic in the rest of - * tryPick */ - trace( - LogVerbosity.INFO, - 'channel', - 'Failed to start call on picked subchannel ' + - pickResult.subchannel!.getAddress() + - ' with error ' + - (error as Error).message + - '. Retrying pick' - ); - this.tryPick(callStream, callMetadata, callConfig); - } else { - trace( - LogVerbosity.INFO, - 'channel', - 'Failed to start call on picked subchanel ' + - pickResult.subchannel!.getAddress() + - ' with error ' + - (error as Error).message + - '. Ending call' - ); - callStream.cancelWithStatus( - Status.INTERNAL, - `Failed to start HTTP/2 stream with error: ${(error as Error).message}` - ); - } - } - } else { - /* The logic for doing this here is the same as in the catch - * block above */ - trace( - LogVerbosity.INFO, - 'channel', - 'Picked subchannel ' + - pickResult.subchannel!.getAddress() + - ' has state ' + - ConnectivityState[subchannelState] + - ' after metadata filters. Retrying pick' - ); - this.tryPick(callStream, callMetadata, callConfig); - } - }, - (error: Error & { code: number }) => { - // We assume the error code isn't 0 (Status.OK) - callStream.cancelWithStatus( - (typeof error.code === 'number') ? error.code : Status.UNKNOWN, - `Getting metadata from plugin failed with error: ${error.message}` - ); - } - ); - } - break; - case PickResultType.QUEUE: - this.pushPick(callStream, callMetadata, callConfig); - break; - case PickResultType.TRANSIENT_FAILURE: - if (callMetadata.getOptions().waitForReady) { - this.pushPick(callStream, callMetadata, callConfig); - } else { - callStream.cancelWithStatus( - pickResult.status!.code, - pickResult.status!.details - ); - } - break; - case PickResultType.DROP: - callStream.cancelWithStatus( - pickResult.status!.code, - pickResult.status!.details - ); - break; - default: - throw new Error( - `Invalid state: unknown pickResultType ${pickResult.pickResultType}` - ); - } - } - - private removeConnectivityStateWatcher( - watcherObject: ConnectivityStateWatcher - ) { - const watcherIndex = this.connectivityStateWatchers.findIndex( - (value) => value === watcherObject - ); - if (watcherIndex >= 0) { - this.connectivityStateWatchers.splice(watcherIndex, 1); - } - } - - private updateState(newState: ConnectivityState): void { - trace( - LogVerbosity.DEBUG, - 'connectivity_state', - uriToString(this.target) + - ' ' + - ConnectivityState[this.connectivityState] + - ' -> ' + - ConnectivityState[newState] - ); - this.connectivityState = newState; - const watchersCopy = this.connectivityStateWatchers.slice(); - for (const watcherObject of watchersCopy) { - if (newState !== watcherObject.currentState) { - if(watcherObject.timer) { - clearTimeout(watcherObject.timer); - } - this.removeConnectivityStateWatcher(watcherObject); - watcherObject.callback(); + if (typeof options !== 'object') { + throw new TypeError('Channel options must be an object'); } } - } - - private tryGetConfig(stream: Http2CallStream, metadata: Metadata) { - if (this.configSelector === null) { - /* This branch will only be taken at the beginning of the channel's life, - * before the resolver ever returns a result. So, the - * ResolvingLoadBalancer may be idle and if so it needs to be kicked - * because it now has a pending request. */ - this.resolvingLoadBalancer.exitIdle(); - this.configSelectionQueue.push({ - callStream: stream, - callMetadata: metadata - }); - this.callRefTimerRef(); - } else { - const callConfig = this.configSelector(stream.getMethod(), metadata); - if (callConfig.status === Status.OK) { - this.tryPick(stream, metadata, callConfig); - } else { - stream.cancelWithStatus(callConfig.status, "Failed to route call to method " + stream.getMethod()); - } - } - } - _startCallStream(stream: Http2CallStream, metadata: Metadata) { - this.tryGetConfig(stream, metadata.clone()); + this.internalChannel = new InternalChannel(target, credentials, options); } close() { - this.resolvingLoadBalancer.destroy(); - this.updateState(ConnectivityState.SHUTDOWN); - clearInterval(this.callRefTimer); - - this.subchannelPool.unrefUnusedSubchannels(); + this.internalChannel.close(); } getTarget() { - return uriToString(this.target); + return this.internalChannel.getTarget(); } getConnectivityState(tryToConnect: boolean) { - const connectivityState = this.connectivityState; - if (tryToConnect) { - this.resolvingLoadBalancer.exitIdle(); - } - return connectivityState; + return this.internalChannel.getConnectivityState(tryToConnect); } watchConnectivityState( @@ -559,34 +132,20 @@ export class ChannelImplementation implements Channel { deadline: Date | number, callback: (error?: Error) => void ): void { - if (this.connectivityState === ConnectivityState.SHUTDOWN) { - throw new Error('Channel has been shut down'); - } - let timer = null; - if(deadline !== Infinity) { - const deadlineDate: Date = - deadline instanceof Date ? deadline : new Date(deadline); - const now = new Date(); - if (deadline === -Infinity || deadlineDate <= now) { - process.nextTick( - callback, - new Error('Deadline passed without connectivity state change') - ); - return; - } - timer = setTimeout(() => { - this.removeConnectivityStateWatcher(watcherObject); - callback( - new Error('Deadline passed without connectivity state change') - ); - }, deadlineDate.getTime() - now.getTime()) - } - const watcherObject = { + this.internalChannel.watchConnectivityState( currentState, - callback, - timer - }; - this.connectivityStateWatchers.push(watcherObject); + deadline, + callback + ); + } + + /** + * Get the channelz reference object for this channel. The returned value is + * garbage if channelz is disabled for this channel. + * @returns + */ + getChannelzRef() { + return this.internalChannel.getChannelzRef(); } createCall( @@ -604,35 +163,12 @@ export class ChannelImplementation implements Channel { 'Channel#createCall: deadline must be a number or Date' ); } - if (this.connectivityState === ConnectivityState.SHUTDOWN) { - throw new Error('Channel has been shut down'); - } - const callNumber = getNewCallNumber(); - trace( - LogVerbosity.DEBUG, - 'channel', - uriToString(this.target) + - ' createCall [' + - callNumber + - '] method="' + - method + - '", deadline=' + - deadline - ); - const finalOptions: CallStreamOptions = { - deadline: deadline, - flags: propagateFlags ?? Propagate.DEFAULTS, - host: host ?? this.defaultAuthority, - parentCall: parentCall, - }; - const stream: Http2CallStream = new Http2CallStream( + return this.internalChannel.createCall( method, - this, - finalOptions, - this.filterStackFactory, - this.credentials._getCallCredentials(), - callNumber + deadline, + host, + parentCall, + propagateFlags ); - return stream; } } diff --git a/packages/grpc-js/src/channelz.ts b/packages/grpc-js/src/channelz.ts new file mode 100644 index 000000000..c207e567c --- /dev/null +++ b/packages/grpc-js/src/channelz.ts @@ -0,0 +1,894 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { isIPv4, isIPv6 } from 'net'; +import { OrderedMap, type OrderedMapIterator } from '@js-sdsl/ordered-map'; +import { ConnectivityState } from './connectivity-state'; +import { Status } from './constants'; +import { Timestamp } from './generated/google/protobuf/Timestamp'; +import { Channel as ChannelMessage } from './generated/grpc/channelz/v1/Channel'; +import { ChannelConnectivityState__Output } from './generated/grpc/channelz/v1/ChannelConnectivityState'; +import { ChannelRef as ChannelRefMessage } from './generated/grpc/channelz/v1/ChannelRef'; +import { ChannelTrace } from './generated/grpc/channelz/v1/ChannelTrace'; +import { GetChannelRequest__Output } from './generated/grpc/channelz/v1/GetChannelRequest'; +import { GetChannelResponse } from './generated/grpc/channelz/v1/GetChannelResponse'; +import { sendUnaryData, ServerUnaryCall } from './server-call'; +import { ServerRef as ServerRefMessage } from './generated/grpc/channelz/v1/ServerRef'; +import { SocketRef as SocketRefMessage } from './generated/grpc/channelz/v1/SocketRef'; +import { + isTcpSubchannelAddress, + SubchannelAddress, +} from './subchannel-address'; +import { SubchannelRef as SubchannelRefMessage } from './generated/grpc/channelz/v1/SubchannelRef'; +import { GetServerRequest__Output } from './generated/grpc/channelz/v1/GetServerRequest'; +import { GetServerResponse } from './generated/grpc/channelz/v1/GetServerResponse'; +import { Server as ServerMessage } from './generated/grpc/channelz/v1/Server'; +import { GetServersRequest__Output } from './generated/grpc/channelz/v1/GetServersRequest'; +import { GetServersResponse } from './generated/grpc/channelz/v1/GetServersResponse'; +import { GetTopChannelsRequest__Output } from './generated/grpc/channelz/v1/GetTopChannelsRequest'; +import { GetTopChannelsResponse } from './generated/grpc/channelz/v1/GetTopChannelsResponse'; +import { GetSubchannelRequest__Output } from './generated/grpc/channelz/v1/GetSubchannelRequest'; +import { GetSubchannelResponse } from './generated/grpc/channelz/v1/GetSubchannelResponse'; +import { Subchannel as SubchannelMessage } from './generated/grpc/channelz/v1/Subchannel'; +import { GetSocketRequest__Output } from './generated/grpc/channelz/v1/GetSocketRequest'; +import { GetSocketResponse } from './generated/grpc/channelz/v1/GetSocketResponse'; +import { Socket as SocketMessage } from './generated/grpc/channelz/v1/Socket'; +import { Address } from './generated/grpc/channelz/v1/Address'; +import { Security } from './generated/grpc/channelz/v1/Security'; +import { GetServerSocketsRequest__Output } from './generated/grpc/channelz/v1/GetServerSocketsRequest'; +import { GetServerSocketsResponse } from './generated/grpc/channelz/v1/GetServerSocketsResponse'; +import { + ChannelzDefinition, + ChannelzHandlers, +} from './generated/grpc/channelz/v1/Channelz'; +import { ProtoGrpcType as ChannelzProtoGrpcType } from './generated/channelz'; +import type { loadSync } from '@grpc/proto-loader'; +import { registerAdminService } from './admin'; +import { loadPackageDefinition } from './make-client'; + +export type TraceSeverity = + | 'CT_UNKNOWN' + | 'CT_INFO' + | 'CT_WARNING' + | 'CT_ERROR'; + +interface Ref { + kind: EntityTypes; + id: number; + name: string; +} + +export interface ChannelRef extends Ref { + kind: EntityTypes.channel; +} + +export interface SubchannelRef extends Ref { + kind: EntityTypes.subchannel; +} + +export interface ServerRef extends Ref { + kind: EntityTypes.server; +} + +export interface SocketRef extends Ref { + kind: EntityTypes.socket; +} + +function channelRefToMessage(ref: ChannelRef): ChannelRefMessage { + return { + channel_id: ref.id, + name: ref.name, + }; +} + +function subchannelRefToMessage(ref: SubchannelRef): SubchannelRefMessage { + return { + subchannel_id: ref.id, + name: ref.name, + }; +} + +function serverRefToMessage(ref: ServerRef): ServerRefMessage { + return { + server_id: ref.id, + }; +} + +function socketRefToMessage(ref: SocketRef): SocketRefMessage { + return { + socket_id: ref.id, + name: ref.name, + }; +} + +interface TraceEvent { + description: string; + severity: TraceSeverity; + timestamp: Date; + childChannel?: ChannelRef; + childSubchannel?: SubchannelRef; +} + +/** + * The loose upper bound on the number of events that should be retained in a + * trace. This may be exceeded by up to a factor of 2. Arbitrarily chosen as a + * number that should be large enough to contain the recent relevant + * information, but small enough to not use excessive memory. + */ +const TARGET_RETAINED_TRACES = 32; + +/** + * Default number of sockets/servers/channels/subchannels to return + */ +const DEFAULT_MAX_RESULTS = 100; + +export class ChannelzTraceStub { + readonly events: TraceEvent[] = []; + readonly creationTimestamp: Date = new Date(); + readonly eventsLogged = 0; + + addTrace(): void {} + getTraceMessage(): ChannelTrace { + return { + creation_timestamp: dateToProtoTimestamp(this.creationTimestamp), + num_events_logged: this.eventsLogged, + events: [], + }; + } +} + +export class ChannelzTrace { + events: TraceEvent[] = []; + creationTimestamp: Date; + eventsLogged = 0; + + constructor() { + this.creationTimestamp = new Date(); + } + + addTrace( + severity: TraceSeverity, + description: string, + child?: ChannelRef | SubchannelRef + ) { + const timestamp = new Date(); + this.events.push({ + description: description, + severity: severity, + timestamp: timestamp, + childChannel: child?.kind === 'channel' ? child : undefined, + childSubchannel: child?.kind === 'subchannel' ? child : undefined, + }); + // Whenever the trace array gets too large, discard the first half + if (this.events.length >= TARGET_RETAINED_TRACES * 2) { + this.events = this.events.slice(TARGET_RETAINED_TRACES); + } + this.eventsLogged += 1; + } + + getTraceMessage(): ChannelTrace { + return { + creation_timestamp: dateToProtoTimestamp(this.creationTimestamp), + num_events_logged: this.eventsLogged, + events: this.events.map(event => { + return { + description: event.description, + severity: event.severity, + timestamp: dateToProtoTimestamp(event.timestamp), + channel_ref: event.childChannel + ? channelRefToMessage(event.childChannel) + : null, + subchannel_ref: event.childSubchannel + ? subchannelRefToMessage(event.childSubchannel) + : null, + }; + }), + }; + } +} + +type RefOrderedMap = OrderedMap< + number, + { ref: { id: number; kind: EntityTypes; name: string }; count: number } +>; + +export class ChannelzChildrenTracker { + private channelChildren: RefOrderedMap = new OrderedMap(); + private subchannelChildren: RefOrderedMap = new OrderedMap(); + private socketChildren: RefOrderedMap = new OrderedMap(); + private trackerMap = { + [EntityTypes.channel]: this.channelChildren, + [EntityTypes.subchannel]: this.subchannelChildren, + [EntityTypes.socket]: this.socketChildren, + } as const; + + refChild(child: ChannelRef | SubchannelRef | SocketRef) { + const tracker = this.trackerMap[child.kind]; + const trackedChild = tracker.find(child.id); + + if (trackedChild.equals(tracker.end())) { + tracker.setElement( + child.id, + { + ref: child, + count: 1, + }, + trackedChild + ); + } else { + trackedChild.pointer[1].count += 1; + } + } + + unrefChild(child: ChannelRef | SubchannelRef | SocketRef) { + const tracker = this.trackerMap[child.kind]; + const trackedChild = tracker.getElementByKey(child.id); + if (trackedChild !== undefined) { + trackedChild.count -= 1; + if (trackedChild.count === 0) { + tracker.eraseElementByKey(child.id); + } + } + } + + getChildLists(): ChannelzChildren { + return { + channels: this.channelChildren as ChannelzChildren['channels'], + subchannels: this.subchannelChildren as ChannelzChildren['subchannels'], + sockets: this.socketChildren as ChannelzChildren['sockets'], + }; + } +} + +export class ChannelzChildrenTrackerStub extends ChannelzChildrenTracker { + override refChild(): void {} + override unrefChild(): void {} +} + +export class ChannelzCallTracker { + callsStarted = 0; + callsSucceeded = 0; + callsFailed = 0; + lastCallStartedTimestamp: Date | null = null; + + addCallStarted() { + this.callsStarted += 1; + this.lastCallStartedTimestamp = new Date(); + } + addCallSucceeded() { + this.callsSucceeded += 1; + } + addCallFailed() { + this.callsFailed += 1; + } +} + +export class ChannelzCallTrackerStub extends ChannelzCallTracker { + override addCallStarted() {} + override addCallSucceeded() {} + override addCallFailed() {} +} + +export interface ChannelzChildren { + channels: OrderedMap; + subchannels: OrderedMap; + sockets: OrderedMap; +} + +export interface ChannelInfo { + target: string; + state: ConnectivityState; + trace: ChannelzTrace | ChannelzTraceStub; + callTracker: ChannelzCallTracker | ChannelzCallTrackerStub; + children: ChannelzChildren; +} + +export type SubchannelInfo = ChannelInfo; + +export interface ServerInfo { + trace: ChannelzTrace; + callTracker: ChannelzCallTracker; + listenerChildren: ChannelzChildren; + sessionChildren: ChannelzChildren; +} + +export interface TlsInfo { + cipherSuiteStandardName: string | null; + cipherSuiteOtherName: string | null; + localCertificate: Buffer | null; + remoteCertificate: Buffer | null; +} + +export interface SocketInfo { + localAddress: SubchannelAddress | null; + remoteAddress: SubchannelAddress | null; + security: TlsInfo | null; + remoteName: string | null; + streamsStarted: number; + streamsSucceeded: number; + streamsFailed: number; + messagesSent: number; + messagesReceived: number; + keepAlivesSent: number; + lastLocalStreamCreatedTimestamp: Date | null; + lastRemoteStreamCreatedTimestamp: Date | null; + lastMessageSentTimestamp: Date | null; + lastMessageReceivedTimestamp: Date | null; + localFlowControlWindow: number | null; + remoteFlowControlWindow: number | null; +} + +interface ChannelEntry { + ref: ChannelRef; + getInfo(): ChannelInfo; +} + +interface SubchannelEntry { + ref: SubchannelRef; + getInfo(): SubchannelInfo; +} + +interface ServerEntry { + ref: ServerRef; + getInfo(): ServerInfo; +} + +interface SocketEntry { + ref: SocketRef; + getInfo(): SocketInfo; +} + +export const enum EntityTypes { + channel = 'channel', + subchannel = 'subchannel', + server = 'server', + socket = 'socket', +} + +type EntryOrderedMap = OrderedMap any }>; + +const entityMaps = { + [EntityTypes.channel]: new OrderedMap(), + [EntityTypes.subchannel]: new OrderedMap(), + [EntityTypes.server]: new OrderedMap(), + [EntityTypes.socket]: new OrderedMap(), +} as const; + +export type RefByType = T extends EntityTypes.channel + ? ChannelRef + : T extends EntityTypes.server + ? ServerRef + : T extends EntityTypes.socket + ? SocketRef + : T extends EntityTypes.subchannel + ? SubchannelRef + : never; + +export type EntryByType = T extends EntityTypes.channel + ? ChannelEntry + : T extends EntityTypes.server + ? ServerEntry + : T extends EntityTypes.socket + ? SocketEntry + : T extends EntityTypes.subchannel + ? SubchannelEntry + : never; + +export type InfoByType = T extends EntityTypes.channel + ? ChannelInfo + : T extends EntityTypes.subchannel + ? SubchannelInfo + : T extends EntityTypes.server + ? ServerInfo + : T extends EntityTypes.socket + ? SocketInfo + : never; + +const generateRegisterFn = (kind: R) => { + let nextId = 1; + function getNextId(): number { + return nextId++; + } + + const entityMap: EntryOrderedMap = entityMaps[kind]; + + return ( + name: string, + getInfo: () => InfoByType, + channelzEnabled: boolean + ): RefByType => { + const id = getNextId(); + const ref = { id, name, kind } as RefByType; + if (channelzEnabled) { + entityMap.setElement(id, { ref, getInfo }); + } + return ref; + }; +}; + +export const registerChannelzChannel = generateRegisterFn(EntityTypes.channel); +export const registerChannelzSubchannel = generateRegisterFn( + EntityTypes.subchannel +); +export const registerChannelzServer = generateRegisterFn(EntityTypes.server); +export const registerChannelzSocket = generateRegisterFn(EntityTypes.socket); + +export function unregisterChannelzRef( + ref: ChannelRef | SubchannelRef | ServerRef | SocketRef +) { + entityMaps[ref.kind].eraseElementByKey(ref.id); +} + +/** + * Parse a single section of an IPv6 address as two bytes + * @param addressSection A hexadecimal string of length up to 4 + * @returns The pair of bytes representing this address section + */ +function parseIPv6Section(addressSection: string): [number, number] { + const numberValue = Number.parseInt(addressSection, 16); + return [(numberValue / 256) | 0, numberValue % 256]; +} + +/** + * Parse a chunk of an IPv6 address string to some number of bytes + * @param addressChunk Some number of segments of up to 4 hexadecimal + * characters each, joined by colons. + * @returns The list of bytes representing this address chunk + */ +function parseIPv6Chunk(addressChunk: string): number[] { + if (addressChunk === '') { + return []; + } + const bytePairs = addressChunk + .split(':') + .map(section => parseIPv6Section(section)); + const result: number[] = []; + return result.concat(...bytePairs); +} + +/** + * Converts an IPv4 or IPv6 address from string representation to binary + * representation + * @param ipAddress an IP address in standard IPv4 or IPv6 text format + * @returns + */ +function ipAddressStringToBuffer(ipAddress: string): Buffer | null { + if (isIPv4(ipAddress)) { + return Buffer.from( + Uint8Array.from( + ipAddress.split('.').map(segment => Number.parseInt(segment)) + ) + ); + } else if (isIPv6(ipAddress)) { + let leftSection: string; + let rightSection: string; + const doubleColonIndex = ipAddress.indexOf('::'); + if (doubleColonIndex === -1) { + leftSection = ipAddress; + rightSection = ''; + } else { + leftSection = ipAddress.substring(0, doubleColonIndex); + rightSection = ipAddress.substring(doubleColonIndex + 2); + } + const leftBuffer = Buffer.from(parseIPv6Chunk(leftSection)); + const rightBuffer = Buffer.from(parseIPv6Chunk(rightSection)); + const middleBuffer = Buffer.alloc( + 16 - leftBuffer.length - rightBuffer.length, + 0 + ); + return Buffer.concat([leftBuffer, middleBuffer, rightBuffer]); + } else { + return null; + } +} + +function connectivityStateToMessage( + state: ConnectivityState +): ChannelConnectivityState__Output { + switch (state) { + case ConnectivityState.CONNECTING: + return { + state: 'CONNECTING', + }; + case ConnectivityState.IDLE: + return { + state: 'IDLE', + }; + case ConnectivityState.READY: + return { + state: 'READY', + }; + case ConnectivityState.SHUTDOWN: + return { + state: 'SHUTDOWN', + }; + case ConnectivityState.TRANSIENT_FAILURE: + return { + state: 'TRANSIENT_FAILURE', + }; + default: + return { + state: 'UNKNOWN', + }; + } +} + +function dateToProtoTimestamp(date?: Date | null): Timestamp | null { + if (!date) { + return null; + } + const millisSinceEpoch = date.getTime(); + return { + seconds: (millisSinceEpoch / 1000) | 0, + nanos: (millisSinceEpoch % 1000) * 1_000_000, + }; +} + +function getChannelMessage(channelEntry: ChannelEntry): ChannelMessage { + const resolvedInfo = channelEntry.getInfo(); + const channelRef: ChannelRefMessage[] = []; + const subchannelRef: SubchannelRefMessage[] = []; + + resolvedInfo.children.channels.forEach(el => { + channelRef.push(channelRefToMessage(el[1].ref)); + }); + + resolvedInfo.children.subchannels.forEach(el => { + subchannelRef.push(subchannelRefToMessage(el[1].ref)); + }); + + return { + ref: channelRefToMessage(channelEntry.ref), + data: { + target: resolvedInfo.target, + state: connectivityStateToMessage(resolvedInfo.state), + calls_started: resolvedInfo.callTracker.callsStarted, + calls_succeeded: resolvedInfo.callTracker.callsSucceeded, + calls_failed: resolvedInfo.callTracker.callsFailed, + last_call_started_timestamp: dateToProtoTimestamp( + resolvedInfo.callTracker.lastCallStartedTimestamp + ), + trace: resolvedInfo.trace.getTraceMessage(), + }, + channel_ref: channelRef, + subchannel_ref: subchannelRef, + }; +} + +function GetChannel( + call: ServerUnaryCall, + callback: sendUnaryData +): void { + const channelId = parseInt(call.request.channel_id, 10); + const channelEntry = + entityMaps[EntityTypes.channel].getElementByKey(channelId); + if (channelEntry === undefined) { + callback({ + code: Status.NOT_FOUND, + details: 'No channel data found for id ' + channelId, + }); + return; + } + callback(null, { channel: getChannelMessage(channelEntry) }); +} + +function GetTopChannels( + call: ServerUnaryCall, + callback: sendUnaryData +): void { + const maxResults = + parseInt(call.request.max_results, 10) || DEFAULT_MAX_RESULTS; + const resultList: ChannelMessage[] = []; + const startId = parseInt(call.request.start_channel_id, 10); + const channelEntries = entityMaps[EntityTypes.channel]; + + let i: OrderedMapIterator; + for ( + i = channelEntries.lowerBound(startId); + !i.equals(channelEntries.end()) && resultList.length < maxResults; + i = i.next() + ) { + resultList.push(getChannelMessage(i.pointer[1])); + } + + callback(null, { + channel: resultList, + end: i.equals(channelEntries.end()), + }); +} + +function getServerMessage(serverEntry: ServerEntry): ServerMessage { + const resolvedInfo = serverEntry.getInfo(); + const listenSocket: SocketRefMessage[] = []; + + resolvedInfo.listenerChildren.sockets.forEach(el => { + listenSocket.push(socketRefToMessage(el[1].ref)); + }); + + return { + ref: serverRefToMessage(serverEntry.ref), + data: { + calls_started: resolvedInfo.callTracker.callsStarted, + calls_succeeded: resolvedInfo.callTracker.callsSucceeded, + calls_failed: resolvedInfo.callTracker.callsFailed, + last_call_started_timestamp: dateToProtoTimestamp( + resolvedInfo.callTracker.lastCallStartedTimestamp + ), + trace: resolvedInfo.trace.getTraceMessage(), + }, + listen_socket: listenSocket, + }; +} + +function GetServer( + call: ServerUnaryCall, + callback: sendUnaryData +): void { + const serverId = parseInt(call.request.server_id, 10); + const serverEntries = entityMaps[EntityTypes.server]; + const serverEntry = serverEntries.getElementByKey(serverId); + if (serverEntry === undefined) { + callback({ + code: Status.NOT_FOUND, + details: 'No server data found for id ' + serverId, + }); + return; + } + callback(null, { server: getServerMessage(serverEntry) }); +} + +function GetServers( + call: ServerUnaryCall, + callback: sendUnaryData +): void { + const maxResults = + parseInt(call.request.max_results, 10) || DEFAULT_MAX_RESULTS; + const startId = parseInt(call.request.start_server_id, 10); + const serverEntries = entityMaps[EntityTypes.server]; + const resultList: ServerMessage[] = []; + + let i: OrderedMapIterator; + for ( + i = serverEntries.lowerBound(startId); + !i.equals(serverEntries.end()) && resultList.length < maxResults; + i = i.next() + ) { + resultList.push(getServerMessage(i.pointer[1])); + } + + callback(null, { + server: resultList, + end: i.equals(serverEntries.end()), + }); +} + +function GetSubchannel( + call: ServerUnaryCall, + callback: sendUnaryData +): void { + const subchannelId = parseInt(call.request.subchannel_id, 10); + const subchannelEntry = + entityMaps[EntityTypes.subchannel].getElementByKey(subchannelId); + if (subchannelEntry === undefined) { + callback({ + code: Status.NOT_FOUND, + details: 'No subchannel data found for id ' + subchannelId, + }); + return; + } + const resolvedInfo = subchannelEntry.getInfo(); + const listenSocket: SocketRefMessage[] = []; + + resolvedInfo.children.sockets.forEach(el => { + listenSocket.push(socketRefToMessage(el[1].ref)); + }); + + const subchannelMessage: SubchannelMessage = { + ref: subchannelRefToMessage(subchannelEntry.ref), + data: { + target: resolvedInfo.target, + state: connectivityStateToMessage(resolvedInfo.state), + calls_started: resolvedInfo.callTracker.callsStarted, + calls_succeeded: resolvedInfo.callTracker.callsSucceeded, + calls_failed: resolvedInfo.callTracker.callsFailed, + last_call_started_timestamp: dateToProtoTimestamp( + resolvedInfo.callTracker.lastCallStartedTimestamp + ), + trace: resolvedInfo.trace.getTraceMessage(), + }, + socket_ref: listenSocket, + }; + callback(null, { subchannel: subchannelMessage }); +} + +function subchannelAddressToAddressMessage( + subchannelAddress: SubchannelAddress +): Address { + if (isTcpSubchannelAddress(subchannelAddress)) { + return { + address: 'tcpip_address', + tcpip_address: { + ip_address: + ipAddressStringToBuffer(subchannelAddress.host) ?? undefined, + port: subchannelAddress.port, + }, + }; + } else { + return { + address: 'uds_address', + uds_address: { + filename: subchannelAddress.path, + }, + }; + } +} + +function GetSocket( + call: ServerUnaryCall, + callback: sendUnaryData +): void { + const socketId = parseInt(call.request.socket_id, 10); + const socketEntry = entityMaps[EntityTypes.socket].getElementByKey(socketId); + if (socketEntry === undefined) { + callback({ + code: Status.NOT_FOUND, + details: 'No socket data found for id ' + socketId, + }); + return; + } + const resolvedInfo = socketEntry.getInfo(); + const securityMessage: Security | null = resolvedInfo.security + ? { + model: 'tls', + tls: { + cipher_suite: resolvedInfo.security.cipherSuiteStandardName + ? 'standard_name' + : 'other_name', + standard_name: + resolvedInfo.security.cipherSuiteStandardName ?? undefined, + other_name: resolvedInfo.security.cipherSuiteOtherName ?? undefined, + local_certificate: + resolvedInfo.security.localCertificate ?? undefined, + remote_certificate: + resolvedInfo.security.remoteCertificate ?? undefined, + }, + } + : null; + const socketMessage: SocketMessage = { + ref: socketRefToMessage(socketEntry.ref), + local: resolvedInfo.localAddress + ? subchannelAddressToAddressMessage(resolvedInfo.localAddress) + : null, + remote: resolvedInfo.remoteAddress + ? subchannelAddressToAddressMessage(resolvedInfo.remoteAddress) + : null, + remote_name: resolvedInfo.remoteName ?? undefined, + security: securityMessage, + data: { + keep_alives_sent: resolvedInfo.keepAlivesSent, + streams_started: resolvedInfo.streamsStarted, + streams_succeeded: resolvedInfo.streamsSucceeded, + streams_failed: resolvedInfo.streamsFailed, + last_local_stream_created_timestamp: dateToProtoTimestamp( + resolvedInfo.lastLocalStreamCreatedTimestamp + ), + last_remote_stream_created_timestamp: dateToProtoTimestamp( + resolvedInfo.lastRemoteStreamCreatedTimestamp + ), + messages_received: resolvedInfo.messagesReceived, + messages_sent: resolvedInfo.messagesSent, + last_message_received_timestamp: dateToProtoTimestamp( + resolvedInfo.lastMessageReceivedTimestamp + ), + last_message_sent_timestamp: dateToProtoTimestamp( + resolvedInfo.lastMessageSentTimestamp + ), + local_flow_control_window: resolvedInfo.localFlowControlWindow + ? { value: resolvedInfo.localFlowControlWindow } + : null, + remote_flow_control_window: resolvedInfo.remoteFlowControlWindow + ? { value: resolvedInfo.remoteFlowControlWindow } + : null, + }, + }; + callback(null, { socket: socketMessage }); +} + +function GetServerSockets( + call: ServerUnaryCall< + GetServerSocketsRequest__Output, + GetServerSocketsResponse + >, + callback: sendUnaryData +): void { + const serverId = parseInt(call.request.server_id, 10); + const serverEntry = entityMaps[EntityTypes.server].getElementByKey(serverId); + + if (serverEntry === undefined) { + callback({ + code: Status.NOT_FOUND, + details: 'No server data found for id ' + serverId, + }); + return; + } + + const startId = parseInt(call.request.start_socket_id, 10); + const maxResults = + parseInt(call.request.max_results, 10) || DEFAULT_MAX_RESULTS; + const resolvedInfo = serverEntry.getInfo(); + // If we wanted to include listener sockets in the result, this line would + // instead say + // const allSockets = resolvedInfo.listenerChildren.sockets.concat(resolvedInfo.sessionChildren.sockets).sort((ref1, ref2) => ref1.id - ref2.id); + const allSockets = resolvedInfo.sessionChildren.sockets; + const resultList: SocketRefMessage[] = []; + + let i: OrderedMapIterator; + for ( + i = allSockets.lowerBound(startId); + !i.equals(allSockets.end()) && resultList.length < maxResults; + i = i.next() + ) { + resultList.push(socketRefToMessage(i.pointer[1].ref)); + } + + callback(null, { + socket_ref: resultList, + end: i.equals(allSockets.end()), + }); +} + +export function getChannelzHandlers(): ChannelzHandlers { + return { + GetChannel, + GetTopChannels, + GetServer, + GetServers, + GetSubchannel, + GetSocket, + GetServerSockets, + }; +} + +let loadedChannelzDefinition: ChannelzDefinition | null = null; + +export function getChannelzServiceDefinition(): ChannelzDefinition { + if (loadedChannelzDefinition) { + return loadedChannelzDefinition; + } + /* The purpose of this complexity is to avoid loading @grpc/proto-loader at + * runtime for users who will not use/enable channelz. */ + const loaderLoadSync = require('@grpc/proto-loader') + .loadSync as typeof loadSync; + const loadedProto = loaderLoadSync('channelz.proto', { + keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true, + includeDirs: [`${__dirname}/../../proto`], + }); + const channelzGrpcObject = loadPackageDefinition( + loadedProto + ) as unknown as ChannelzProtoGrpcType; + loadedChannelzDefinition = + channelzGrpcObject.grpc.channelz.v1.Channelz.service; + return loadedChannelzDefinition; +} + +export function setup() { + registerAdminService(getChannelzServiceDefinition, getChannelzHandlers); +} diff --git a/packages/grpc-js/src/client-interceptors.ts b/packages/grpc-js/src/client-interceptors.ts index a7cc2f878..4f53d1775 100644 --- a/packages/grpc-js/src/client-interceptors.ts +++ b/packages/grpc-js/src/client-interceptors.ts @@ -28,12 +28,12 @@ import { isInterceptingListener, MessageContext, Call, -} from './call-stream'; +} from './call-interface'; import { Status } from './constants'; import { Channel } from './channel'; import { CallOptions } from './client'; -import { CallCredentials } from './call-credentials'; import { ClientMethodDefinition } from './make-client'; +import { getErrorMessage } from './error'; /** * Error class associated with passing both interceptors and interceptor @@ -175,10 +175,10 @@ const defaultRequester: FullRequester = { sendMessage: (message, next) => { next(message); }, - halfClose: (next) => { + halfClose: next => { next(); }, - cancel: (next) => { + cancel: next => { next(); }, }; @@ -198,8 +198,6 @@ export interface InterceptingCallInterface { sendMessage(message: any): void; startRead(): void; halfClose(): void; - - setCredentials(credentials: CallCredentials): void; } export class InterceptingCall implements InterceptingCallInterface { @@ -208,8 +206,18 @@ export class InterceptingCall implements InterceptingCallInterface { */ private requester: FullRequester; /** - * Indicates that a message has been passed to the listener's onReceiveMessage - * method it has not been passed to the corresponding next callback + * Indicates that metadata has been passed to the requester's start + * method but it has not been passed to the corresponding next callback + */ + private processingMetadata = false; + /** + * Message context for a pending message that is waiting for + */ + private pendingMessageContext: MessageContext | null = null; + private pendingMessage: any; + /** + * Indicates that a message has been passed to the requester's sendMessage + * method but it has not been passed to the corresponding next callback */ private processingMessage = false; /** @@ -242,6 +250,24 @@ export class InterceptingCall implements InterceptingCallInterface { getPeer() { return this.nextCall.getPeer(); } + + private processPendingMessage() { + if (this.pendingMessageContext) { + this.nextCall.sendMessageWithContext( + this.pendingMessageContext, + this.pendingMessage + ); + this.pendingMessageContext = null; + this.pendingMessage = null; + } + } + + private processPendingHalfClose() { + if (this.pendingHalfClose) { + this.nextCall.halfClose(); + } + } + start( metadata: Metadata, interceptingListener?: Partial @@ -249,15 +275,17 @@ export class InterceptingCall implements InterceptingCallInterface { const fullInterceptingListener: InterceptingListener = { onReceiveMetadata: interceptingListener?.onReceiveMetadata?.bind(interceptingListener) ?? - ((metadata) => {}), + (metadata => {}), onReceiveMessage: interceptingListener?.onReceiveMessage?.bind(interceptingListener) ?? - ((message) => {}), + (message => {}), onReceiveStatus: interceptingListener?.onReceiveStatus?.bind(interceptingListener) ?? - ((status) => {}), + (status => {}), }; + this.processingMetadata = true; this.requester.start(metadata, fullInterceptingListener, (md, listener) => { + this.processingMetadata = false; let finalInterceptingListener: InterceptingListener; if (isInterceptingListener(listener)) { finalInterceptingListener = listener; @@ -276,16 +304,21 @@ export class InterceptingCall implements InterceptingCallInterface { ); } this.nextCall.start(md, finalInterceptingListener); + this.processPendingMessage(); + this.processPendingHalfClose(); }); } // eslint-disable-next-line @typescript-eslint/no-explicit-any sendMessageWithContext(context: MessageContext, message: any): void { this.processingMessage = true; - this.requester.sendMessage(message, (finalMessage) => { + this.requester.sendMessage(message, finalMessage => { this.processingMessage = false; - this.nextCall.sendMessageWithContext(context, finalMessage); - if (this.pendingHalfClose) { - this.nextCall.halfClose(); + if (this.processingMetadata) { + this.pendingMessageContext = context; + this.pendingMessage = message; + } else { + this.nextCall.sendMessageWithContext(context, finalMessage); + this.processPendingHalfClose(); } }); } @@ -298,16 +331,13 @@ export class InterceptingCall implements InterceptingCallInterface { } halfClose(): void { this.requester.halfClose(() => { - if (this.processingMessage) { + if (this.processingMetadata || this.processingMessage) { this.pendingHalfClose = true; } else { this.nextCall.halfClose(); } }); } - setCredentials(credentials: CallCredentials): void { - this.nextCall.setCredentials(credentials); - } } function getCall(channel: Channel, path: string, options: CallOptions): Call { @@ -339,16 +369,16 @@ class BaseInterceptingCall implements InterceptingCallInterface { getPeer(): string { return this.call.getPeer(); } - setCredentials(credentials: CallCredentials): void { - this.call.setCredentials(credentials); - } // eslint-disable-next-line @typescript-eslint/no-explicit-any sendMessageWithContext(context: MessageContext, message: any): void { let serialized: Buffer; try { serialized = this.methodDefinition.requestSerialize(message); } catch (e) { - this.call.cancelWithStatus(Status.INTERNAL, `Request message serialization failure: ${e.message}`); + this.call.cancelWithStatus( + Status.INTERNAL, + `Request message serialization failure: ${getErrorMessage(e)}` + ); return; } this.call.sendMessageWithContext(context, serialized); @@ -363,10 +393,10 @@ class BaseInterceptingCall implements InterceptingCallInterface { ): void { let readError: StatusObject | null = null; this.call.start(metadata, { - onReceiveMetadata: (metadata) => { + onReceiveMetadata: metadata => { interceptingListener?.onReceiveMetadata?.(metadata); }, - onReceiveMessage: (message) => { + onReceiveMessage: message => { // eslint-disable-next-line @typescript-eslint/no-explicit-any let deserialized: any; try { @@ -374,7 +404,7 @@ class BaseInterceptingCall implements InterceptingCallInterface { } catch (e) { readError = { code: Status.INTERNAL, - details: `Response message parsing error: ${e.message}`, + details: `Response message parsing error: ${getErrorMessage(e)}`, metadata: new Metadata(), }; this.call.cancelWithStatus(readError.code, readError.details); @@ -382,7 +412,7 @@ class BaseInterceptingCall implements InterceptingCallInterface { } interceptingListener?.onReceiveMessage?.(deserialized); }, - onReceiveStatus: (status) => { + onReceiveStatus: status => { if (readError) { interceptingListener?.onReceiveStatus?.(readError); } else { @@ -403,8 +433,10 @@ class BaseInterceptingCall implements InterceptingCallInterface { * BaseInterceptingCall with special-cased behavior for methods with unary * responses. */ -class BaseUnaryInterceptingCall extends BaseInterceptingCall - implements InterceptingCallInterface { +class BaseUnaryInterceptingCall + extends BaseInterceptingCall + implements InterceptingCallInterface +{ // eslint-disable-next-line @typescript-eslint/no-explicit-any constructor(call: Call, methodDefinition: ClientMethodDefinition) { super(call, methodDefinition); @@ -413,7 +445,7 @@ class BaseUnaryInterceptingCall extends BaseInterceptingCall let receivedMessage = false; const wrapperListener: InterceptingListener = { onReceiveMetadata: - listener?.onReceiveMetadata?.bind(listener) ?? ((metadata) => {}), + listener?.onReceiveMetadata?.bind(listener) ?? (metadata => {}), // eslint-disable-next-line @typescript-eslint/no-explicit-any onReceiveMessage: (message: any) => { receivedMessage = true; @@ -435,7 +467,8 @@ class BaseUnaryInterceptingCall extends BaseInterceptingCall * BaseInterceptingCall with special-cased behavior for methods with streaming * responses. */ -class BaseStreamingInterceptingCall extends BaseInterceptingCall +class BaseStreamingInterceptingCall + extends BaseInterceptingCall implements InterceptingCallInterface {} function getBottomInterceptingCall( @@ -506,21 +539,21 @@ export function getInterceptingCall( interceptors = ([] as Interceptor[]) .concat( interceptorArgs.callInterceptors, - interceptorArgs.callInterceptorProviders.map((provider) => + interceptorArgs.callInterceptorProviders.map(provider => provider(methodDefinition) ) ) - .filter((interceptor) => interceptor); + .filter(interceptor => interceptor); // Filter out falsy values when providers return nothing } else { interceptors = ([] as Interceptor[]) .concat( interceptorArgs.clientInterceptors, - interceptorArgs.clientInterceptorProviders.map((provider) => + interceptorArgs.clientInterceptorProviders.map(provider => provider(methodDefinition) ) ) - .filter((interceptor) => interceptor); + .filter(interceptor => interceptor); // Filter out falsy values when providers return nothing } const interceptorOptions = Object.assign({}, options, { @@ -535,7 +568,7 @@ export function getInterceptingCall( * channel. */ const getCall: NextCall = interceptors.reduceRight( (nextCall: NextCall, nextInterceptor: Interceptor) => { - return (currentOptions) => nextInterceptor(currentOptions, nextCall); + return currentOptions => nextInterceptor(currentOptions, nextCall); }, (finalOptions: InterceptorOptions) => getBottomInterceptingCall(channel, finalOptions, methodDefinition) diff --git a/packages/grpc-js/src/client.ts b/packages/grpc-js/src/client.ts index 204be9017..995d5b328 100644 --- a/packages/grpc-js/src/client.ts +++ b/packages/grpc-js/src/client.ts @@ -29,8 +29,9 @@ import { SurfaceCall, } from './call'; import { CallCredentials } from './call-credentials'; -import { Deadline, StatusObject } from './call-stream'; -import { Channel, ConnectivityState, ChannelImplementation } from './channel'; +import { StatusObject } from './call-interface'; +import { Channel, ChannelImplementation } from './channel'; +import { ConnectivityState } from './connectivity-state'; import { ChannelCredentials } from './channel-credentials'; import { ChannelOptions } from './channel-options'; import { Status } from './constants'; @@ -49,13 +50,16 @@ import { ServerWritableStream, ServerDuplexStream, } from './server-call'; +import { Deadline } from './deadline'; const CHANNEL_SYMBOL = Symbol(); const INTERCEPTOR_SYMBOL = Symbol(); const INTERCEPTOR_PROVIDER_SYMBOL = Symbol(); const CALL_INVOCATION_TRANSFORMER_SYMBOL = Symbol(); -function isFunction(arg: Metadata | CallOptions | UnaryCallback | undefined): arg is UnaryCallback{ +function isFunction( + arg: Metadata | CallOptions | UnaryCallback | undefined +): arg is UnaryCallback { return typeof arg === 'function'; } @@ -105,6 +109,10 @@ export type ClientOptions = Partial & { callInvocationTransformer?: CallInvocationTransformer; }; +function getErrorStackString(error: Error): string { + return error.stack?.split('\n').slice(1).join('\n') || 'no stack trace available'; +} + /** * A generic gRPC client. Primarily useful as a base class for all generated * clients. @@ -265,19 +273,20 @@ export class Client { options?: CallOptions | UnaryCallback, callback?: UnaryCallback ): ClientUnaryCall { - const checkedArguments = this.checkOptionalUnaryResponseArguments< - ResponseType - >(metadata, options, callback); - const methodDefinition: ClientMethodDefinition< - RequestType, - ResponseType - > = { - path: method, - requestStream: false, - responseStream: false, - requestSerialize: serialize, - responseDeserialize: deserialize, - }; + const checkedArguments = + this.checkOptionalUnaryResponseArguments( + metadata, + options, + callback + ); + const methodDefinition: ClientMethodDefinition = + { + path: method, + requestStream: false, + responseStream: false, + requestSerialize: serialize, + responseDeserialize: deserialize, + }; let callProperties: CallProperties = { argument: argument, metadata: checkedArguments.metadata, @@ -311,13 +320,11 @@ export class Client { * before calling the CallInvocationTransformer, and we need to create the * call after that. */ emitter.call = call; - if (callProperties.callOptions.credentials) { - call.setCredentials(callProperties.callOptions.credentials); - } let responseMessage: ResponseType | null = null; let receivedStatus = false; + let callerStackError: Error | null = new Error(); call.start(callProperties.metadata, { - onReceiveMetadata: (metadata) => { + onReceiveMetadata: metadata => { emitter.emit('metadata', metadata); }, // eslint-disable-next-line @typescript-eslint/no-explicit-any @@ -333,10 +340,28 @@ export class Client { } receivedStatus = true; if (status.code === Status.OK) { - callProperties.callback!(null, responseMessage!); + if (responseMessage === null) { + const callerStack = getErrorStackString(callerStackError!); + callProperties.callback!( + callErrorFromStatus( + { + code: Status.INTERNAL, + details: 'No message received', + metadata: status.metadata, + }, + callerStack + ) + ); + } else { + callProperties.callback!(null, responseMessage); + } } else { - callProperties.callback!(callErrorFromStatus(status)); + const callerStack = getErrorStackString(callerStackError!); + callProperties.callback!(callErrorFromStatus(status, callerStack)); } + /* Avoid retaining the callerStackError object in the call context of + * the status event handler. */ + callerStackError = null; emitter.emit('status', status); }, }); @@ -381,19 +406,20 @@ export class Client { options?: CallOptions | UnaryCallback, callback?: UnaryCallback ): ClientWritableStream { - const checkedArguments = this.checkOptionalUnaryResponseArguments< - ResponseType - >(metadata, options, callback); - const methodDefinition: ClientMethodDefinition< - RequestType, - ResponseType - > = { - path: method, - requestStream: true, - responseStream: false, - requestSerialize: serialize, - responseDeserialize: deserialize, - }; + const checkedArguments = + this.checkOptionalUnaryResponseArguments( + metadata, + options, + callback + ); + const methodDefinition: ClientMethodDefinition = + { + path: method, + requestStream: true, + responseStream: false, + requestSerialize: serialize, + responseDeserialize: deserialize, + }; let callProperties: CallProperties = { metadata: checkedArguments.metadata, call: new ClientWritableStreamImpl(serialize), @@ -407,9 +433,8 @@ export class Client { callProperties ) as CallProperties; } - const emitter: ClientWritableStream = callProperties.call as ClientWritableStream< - RequestType - >; + const emitter: ClientWritableStream = + callProperties.call as ClientWritableStream; const interceptorArgs: InterceptorArguments = { clientInterceptors: this[INTERCEPTOR_SYMBOL], clientInterceptorProviders: this[INTERCEPTOR_PROVIDER_SYMBOL], @@ -428,13 +453,11 @@ export class Client { * before calling the CallInvocationTransformer, and we need to create the * call after that. */ emitter.call = call; - if (callProperties.callOptions.credentials) { - call.setCredentials(callProperties.callOptions.credentials); - } let responseMessage: ResponseType | null = null; let receivedStatus = false; + let callerStackError: Error | null = new Error(); call.start(callProperties.metadata, { - onReceiveMetadata: (metadata) => { + onReceiveMetadata: metadata => { emitter.emit('metadata', metadata); }, // eslint-disable-next-line @typescript-eslint/no-explicit-any @@ -450,10 +473,28 @@ export class Client { } receivedStatus = true; if (status.code === Status.OK) { - callProperties.callback!(null, responseMessage!); + if (responseMessage === null) { + const callerStack = getErrorStackString(callerStackError!); + callProperties.callback!( + callErrorFromStatus( + { + code: Status.INTERNAL, + details: 'No message received', + metadata: status.metadata, + }, + callerStack + ) + ); + } else { + callProperties.callback!(null, responseMessage); + } } else { - callProperties.callback!(callErrorFromStatus(status)); + const callerStack = getErrorStackString(callerStackError!); + callProperties.callback!(callErrorFromStatus(status, callerStack)); } + /* Avoid retaining the callerStackError object in the call context of + * the status event handler. */ + callerStackError = null; emitter.emit('status', status); }, }); @@ -508,16 +549,14 @@ export class Client { options?: CallOptions ): ClientReadableStream { const checkedArguments = this.checkMetadataAndOptions(metadata, options); - const methodDefinition: ClientMethodDefinition< - RequestType, - ResponseType - > = { - path: method, - requestStream: false, - responseStream: true, - requestSerialize: serialize, - responseDeserialize: deserialize, - }; + const methodDefinition: ClientMethodDefinition = + { + path: method, + requestStream: false, + responseStream: true, + requestSerialize: serialize, + responseDeserialize: deserialize, + }; let callProperties: CallProperties = { argument: argument, metadata: checkedArguments.metadata, @@ -531,9 +570,8 @@ export class Client { callProperties ) as CallProperties; } - const stream: ClientReadableStream = callProperties.call as ClientReadableStream< - ResponseType - >; + const stream: ClientReadableStream = + callProperties.call as ClientReadableStream; const interceptorArgs: InterceptorArguments = { clientInterceptors: this[INTERCEPTOR_SYMBOL], clientInterceptorProviders: this[INTERCEPTOR_PROVIDER_SYMBOL], @@ -552,10 +590,8 @@ export class Client { * before calling the CallInvocationTransformer, and we need to create the * call after that. */ stream.call = call; - if (callProperties.callOptions.credentials) { - call.setCredentials(callProperties.callOptions.credentials); - } let receivedStatus = false; + let callerStackError: Error | null = new Error(); call.start(callProperties.metadata, { onReceiveMetadata(metadata: Metadata) { stream.emit('metadata', metadata); @@ -571,8 +607,12 @@ export class Client { receivedStatus = true; stream.push(null); if (status.code !== Status.OK) { - stream.emit('error', callErrorFromStatus(status)); + const callerStack = getErrorStackString(callerStackError!); + stream.emit('error', callErrorFromStatus(status, callerStack)); } + /* Avoid retaining the callerStackError object in the call context of + * the status event handler. */ + callerStackError = null; stream.emit('status', status); }, }); @@ -602,16 +642,14 @@ export class Client { options?: CallOptions ): ClientDuplexStream { const checkedArguments = this.checkMetadataAndOptions(metadata, options); - const methodDefinition: ClientMethodDefinition< - RequestType, - ResponseType - > = { - path: method, - requestStream: true, - responseStream: true, - requestSerialize: serialize, - responseDeserialize: deserialize, - }; + const methodDefinition: ClientMethodDefinition = + { + path: method, + requestStream: true, + responseStream: true, + requestSerialize: serialize, + responseDeserialize: deserialize, + }; let callProperties: CallProperties = { metadata: checkedArguments.metadata, call: new ClientDuplexStreamImpl( @@ -627,10 +665,8 @@ export class Client { callProperties ) as CallProperties; } - const stream: ClientDuplexStream< - RequestType, - ResponseType - > = callProperties.call as ClientDuplexStream; + const stream: ClientDuplexStream = + callProperties.call as ClientDuplexStream; const interceptorArgs: InterceptorArguments = { clientInterceptors: this[INTERCEPTOR_SYMBOL], clientInterceptorProviders: this[INTERCEPTOR_PROVIDER_SYMBOL], @@ -649,16 +685,14 @@ export class Client { * before calling the CallInvocationTransformer, and we need to create the * call after that. */ stream.call = call; - if (callProperties.callOptions.credentials) { - call.setCredentials(callProperties.callOptions.credentials); - } let receivedStatus = false; + let callerStackError: Error | null = new Error(); call.start(callProperties.metadata, { onReceiveMetadata(metadata: Metadata) { stream.emit('metadata', metadata); }, onReceiveMessage(message: Buffer) { - stream.push(message) + stream.push(message); }, onReceiveStatus(status: StatusObject) { if (receivedStatus) { @@ -667,12 +701,15 @@ export class Client { receivedStatus = true; stream.push(null); if (status.code !== Status.OK) { - stream.emit('error', callErrorFromStatus(status)); + const callerStack = getErrorStackString(callerStackError!); + stream.emit('error', callErrorFromStatus(status, callerStack)); } + /* Avoid retaining the callerStackError object in the call context of + * the status event handler. */ + callerStackError = null; stream.emit('status', status); }, }); return stream; } } - diff --git a/packages/grpc-js-xds/src/xds-stream-state/xds-stream-state.ts b/packages/grpc-js/src/compression-algorithms.ts similarity index 50% rename from packages/grpc-js-xds/src/xds-stream-state/xds-stream-state.ts rename to packages/grpc-js/src/compression-algorithms.ts index 83db1781e..67fdcf14c 100644 --- a/packages/grpc-js-xds/src/xds-stream-state/xds-stream-state.ts +++ b/packages/grpc-js/src/compression-algorithms.ts @@ -15,24 +15,8 @@ * */ -import { StatusObject } from "@grpc/grpc-js"; - -export interface Watcher { - onValidUpdate(update: UpdateType): void; - onTransientError(error: StatusObject): void; - onResourceDoesNotExist(): void; +export enum CompressionAlgorithms { + identity = 0, + deflate = 1, + gzip = 2, } - -export interface XdsStreamState { - versionInfo: string; - nonce: string; - getResourceNames(): string[]; - /** - * Returns a string containing the error details if the message should be nacked, - * or null if it should be acked. - * @param responses - */ - handleResponses(responses: ResponseType[]): string | null; - - reportStreamError(status: StatusObject): void; -} \ No newline at end of file diff --git a/packages/grpc-js/src/compression-filter.ts b/packages/grpc-js/src/compression-filter.ts index 330eb675a..f1600b36d 100644 --- a/packages/grpc-js/src/compression-filter.ts +++ b/packages/grpc-js/src/compression-filter.ts @@ -17,11 +17,29 @@ import * as zlib from 'zlib'; -import { Call, WriteFlags, WriteObject } from './call-stream'; +import { WriteObject, WriteFlags } from './call-interface'; import { Channel } from './channel'; +import { ChannelOptions } from './channel-options'; +import { CompressionAlgorithms } from './compression-algorithms'; +import { DEFAULT_MAX_RECEIVE_MESSAGE_LENGTH, LogVerbosity, Status } from './constants'; import { BaseFilter, Filter, FilterFactory } from './filter'; +import * as logging from './logging'; import { Metadata, MetadataValue } from './metadata'; +const isCompressionAlgorithmKey = ( + key: number +): key is CompressionAlgorithms => { + return ( + typeof key === 'number' && typeof CompressionAlgorithms[key] === 'string' + ); +}; + +type CompressionAlgorithm = keyof typeof CompressionAlgorithms; + +type SharedCompressionFilterConfig = { + serverSupportedEncodingHeader?: string; +}; + abstract class CompressionHandler { protected abstract compressMessage(message: Buffer): Promise; protected abstract decompressMessage(data: Buffer): Promise; @@ -80,6 +98,10 @@ class IdentityHandler extends CompressionHandler { } class DeflateHandler extends CompressionHandler { + constructor(private maxRecvMessageLength: number) { + super(); + } + compressMessage(message: Buffer) { return new Promise((resolve, reject) => { zlib.deflate(message, (err, output) => { @@ -94,18 +116,34 @@ class DeflateHandler extends CompressionHandler { decompressMessage(message: Buffer) { return new Promise((resolve, reject) => { - zlib.inflate(message, (err, output) => { - if (err) { - reject(err); - } else { - resolve(output); + let totalLength = 0; + const messageParts: Buffer[] = []; + const decompresser = zlib.createInflate(); + decompresser.on('data', (chunk: Buffer) => { + messageParts.push(chunk); + totalLength += chunk.byteLength; + if (this.maxRecvMessageLength !== -1 && totalLength > this.maxRecvMessageLength) { + decompresser.destroy(); + reject({ + code: Status.RESOURCE_EXHAUSTED, + details: `Received message that decompresses to a size larger than ${this.maxRecvMessageLength}` + }); } }); + decompresser.on('end', () => { + resolve(Buffer.concat(messageParts)); + }); + decompresser.write(message); + decompresser.end(); }); } } class GzipHandler extends CompressionHandler { + constructor(private maxRecvMessageLength: number) { + super(); + } + compressMessage(message: Buffer) { return new Promise((resolve, reject) => { zlib.gzip(message, (err, output) => { @@ -120,13 +158,25 @@ class GzipHandler extends CompressionHandler { decompressMessage(message: Buffer) { return new Promise((resolve, reject) => { - zlib.unzip(message, (err, output) => { - if (err) { - reject(err); - } else { - resolve(output); + let totalLength = 0; + const messageParts: Buffer[] = []; + const decompresser = zlib.createGunzip(); + decompresser.on('data', (chunk: Buffer) => { + messageParts.push(chunk); + totalLength += chunk.byteLength; + if (this.maxRecvMessageLength !== -1 && totalLength > this.maxRecvMessageLength) { + decompresser.destroy(); + reject({ + code: Status.RESOURCE_EXHAUSTED, + details: `Received message that decompresses to a size larger than ${this.maxRecvMessageLength}` + }); } }); + decompresser.on('end', () => { + resolve(Buffer.concat(messageParts)); + }); + decompresser.write(message); + decompresser.end(); }); } } @@ -151,14 +201,14 @@ class UnknownHandler extends CompressionHandler { } } -function getCompressionHandler(compressionName: string): CompressionHandler { +function getCompressionHandler(compressionName: string, maxReceiveMessageSize: number): CompressionHandler { switch (compressionName) { case 'identity': return new IdentityHandler(); case 'deflate': - return new DeflateHandler(); + return new DeflateHandler(maxReceiveMessageSize); case 'gzip': - return new GzipHandler(); + return new GzipHandler(maxReceiveMessageSize); default: return new UnknownHandler(compressionName); } @@ -167,10 +217,63 @@ function getCompressionHandler(compressionName: string): CompressionHandler { export class CompressionFilter extends BaseFilter implements Filter { private sendCompression: CompressionHandler = new IdentityHandler(); private receiveCompression: CompressionHandler = new IdentityHandler(); + private currentCompressionAlgorithm: CompressionAlgorithm = 'identity'; + private maxReceiveMessageLength: number; + + constructor( + channelOptions: ChannelOptions, + private sharedFilterConfig: SharedCompressionFilterConfig + ) { + super(); + + const compressionAlgorithmKey = + channelOptions['grpc.default_compression_algorithm']; + this.maxReceiveMessageLength = channelOptions['grpc.max_receive_message_length'] ?? DEFAULT_MAX_RECEIVE_MESSAGE_LENGTH + if (compressionAlgorithmKey !== undefined) { + if (isCompressionAlgorithmKey(compressionAlgorithmKey)) { + const clientSelectedEncoding = CompressionAlgorithms[ + compressionAlgorithmKey + ] as CompressionAlgorithm; + const serverSupportedEncodings = + sharedFilterConfig.serverSupportedEncodingHeader?.split(','); + /** + * There are two possible situations here: + * 1) We don't have any info yet from the server about what compression it supports + * In that case we should just use what the client tells us to use + * 2) We've previously received a response from the server including a grpc-accept-encoding header + * In that case we only want to use the encoding chosen by the client if the server supports it + */ + if ( + !serverSupportedEncodings || + serverSupportedEncodings.includes(clientSelectedEncoding) + ) { + this.currentCompressionAlgorithm = clientSelectedEncoding; + this.sendCompression = getCompressionHandler( + this.currentCompressionAlgorithm, + -1 + ); + } + } else { + logging.log( + LogVerbosity.ERROR, + `Invalid value provided for grpc.default_compression_algorithm option: ${compressionAlgorithmKey}` + ); + } + } + } + async sendMetadata(metadata: Promise): Promise { const headers: Metadata = await metadata; headers.set('grpc-accept-encoding', 'identity,deflate,gzip'); headers.set('accept-encoding', 'identity'); + + // No need to send the header if it's "identity" - behavior is identical; save the bandwidth + if (this.currentCompressionAlgorithm === 'identity') { + headers.remove('grpc-encoding'); + } else { + headers.set('grpc-encoding', this.currentCompressionAlgorithm); + } + return headers; } @@ -179,10 +282,29 @@ export class CompressionFilter extends BaseFilter implements Filter { if (receiveEncoding.length > 0) { const encoding: MetadataValue = receiveEncoding[0]; if (typeof encoding === 'string') { - this.receiveCompression = getCompressionHandler(encoding); + this.receiveCompression = getCompressionHandler(encoding, this.maxReceiveMessageLength); } } metadata.remove('grpc-encoding'); + + /* Check to see if the compression we're using to send messages is supported by the server + * If not, reset the sendCompression filter and have it use the default IdentityHandler */ + const serverSupportedEncodingsHeader = metadata.get( + 'grpc-accept-encoding' + )[0] as string | undefined; + if (serverSupportedEncodingsHeader) { + this.sharedFilterConfig.serverSupportedEncodingHeader = + serverSupportedEncodingsHeader; + const serverSupportedEncodings = + serverSupportedEncodingsHeader.split(','); + + if ( + !serverSupportedEncodings.includes(this.currentCompressionAlgorithm) + ) { + this.sendCompression = new IdentityHandler(); + this.currentCompressionAlgorithm = 'identity'; + } + } metadata.remove('grpc-accept-encoding'); return metadata; } @@ -192,10 +314,13 @@ export class CompressionFilter extends BaseFilter implements Filter { * and the output is a framed and possibly compressed message. For this * reason, this filter should be at the bottom of the filter stack */ const resolvedMessage: WriteObject = await message; - const compress = - resolvedMessage.flags === undefined - ? false - : (resolvedMessage.flags & WriteFlags.NoCompress) === 0; + let compress: boolean; + if (this.sendCompression instanceof IdentityHandler) { + compress = false; + } else { + compress = ((resolvedMessage.flags ?? 0) & WriteFlags.NoCompress) === 0; + } + return { message: await this.sendCompression.writeMessage( resolvedMessage.message, @@ -215,9 +340,11 @@ export class CompressionFilter extends BaseFilter implements Filter { } export class CompressionFilterFactory - implements FilterFactory { - constructor(private readonly channel: Channel) {} - createFilter(callStream: Call): CompressionFilter { - return new CompressionFilter(); + implements FilterFactory +{ + private sharedFilterConfig: SharedCompressionFilterConfig = {}; + constructor(channel: Channel, private readonly options: ChannelOptions) {} + createFilter(): CompressionFilter { + return new CompressionFilter(this.options, this.sharedFilterConfig); } } diff --git a/packages/grpc-js/src/connectivity-state.ts b/packages/grpc-js/src/connectivity-state.ts new file mode 100644 index 000000000..560ab9c39 --- /dev/null +++ b/packages/grpc-js/src/connectivity-state.ts @@ -0,0 +1,24 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +export enum ConnectivityState { + IDLE, + CONNECTING, + READY, + TRANSIENT_FAILURE, + SHUTDOWN, +} diff --git a/packages/grpc-js/src/constants.ts b/packages/grpc-js/src/constants.ts index 94763cfe1..865b24c94 100644 --- a/packages/grpc-js/src/constants.ts +++ b/packages/grpc-js/src/constants.ts @@ -52,7 +52,11 @@ export enum Propagate { CENSUS_TRACING_CONTEXT = 4, CANCELLATION = 8, // https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/propagation_bits.h#L43 - DEFAULTS = 0xffff | Propagate.DEADLINE | Propagate.CENSUS_STATS_CONTEXT | Propagate.CENSUS_TRACING_CONTEXT | Propagate.CANCELLATION, + DEFAULTS = 0xffff | + Propagate.DEADLINE | + Propagate.CENSUS_STATS_CONTEXT | + Propagate.CENSUS_TRACING_CONTEXT | + Propagate.CANCELLATION, } // -1 means unlimited diff --git a/packages/grpc-js/src/control-plane-status.ts b/packages/grpc-js/src/control-plane-status.ts new file mode 100644 index 000000000..1d10cb3d9 --- /dev/null +++ b/packages/grpc-js/src/control-plane-status.ts @@ -0,0 +1,43 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { Status } from './constants'; + +const INAPPROPRIATE_CONTROL_PLANE_CODES: Status[] = [ + Status.OK, + Status.INVALID_ARGUMENT, + Status.NOT_FOUND, + Status.ALREADY_EXISTS, + Status.FAILED_PRECONDITION, + Status.ABORTED, + Status.OUT_OF_RANGE, + Status.DATA_LOSS, +]; + +export function restrictControlPlaneStatusCode( + code: Status, + details: string +): { code: Status; details: string } { + if (INAPPROPRIATE_CONTROL_PLANE_CODES.includes(code)) { + return { + code: Status.INTERNAL, + details: `Invalid status from control plane: ${code} ${Status[code]} ${details}`, + }; + } else { + return { code, details }; + } +} diff --git a/packages/grpc-js/src/deadline-filter.ts b/packages/grpc-js/src/deadline-filter.ts deleted file mode 100644 index 99bfa2bec..000000000 --- a/packages/grpc-js/src/deadline-filter.ts +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -import { Call, StatusObject } from './call-stream'; -import { Channel } from './channel'; -import { Status } from './constants'; -import { BaseFilter, Filter, FilterFactory } from './filter'; -import { Metadata } from './metadata'; - -const units: Array<[string, number]> = [ - ['m', 1], - ['S', 1000], - ['M', 60 * 1000], - ['H', 60 * 60 * 1000], -]; - -function getDeadline(deadline: number) { - const now = new Date().getTime(); - const timeoutMs = Math.max(deadline - now, 0); - for (const [unit, factor] of units) { - const amount = timeoutMs / factor; - if (amount < 1e8) { - return String(Math.ceil(amount)) + unit; - } - } - throw new Error('Deadline is too far in the future'); -} - -export class DeadlineFilter extends BaseFilter implements Filter { - private timer: NodeJS.Timer | null = null; - private deadline: number; - constructor( - private readonly channel: Channel, - private readonly callStream: Call - ) { - super(); - const callDeadline = callStream.getDeadline(); - if (callDeadline instanceof Date) { - this.deadline = callDeadline.getTime(); - } else { - this.deadline = callDeadline; - } - const now: number = new Date().getTime(); - let timeout = this.deadline - now; - if (timeout <= 0) { - process.nextTick(() => { - callStream.cancelWithStatus( - Status.DEADLINE_EXCEEDED, - 'Deadline exceeded' - ); - }); - } else if (this.deadline !== Infinity) { - this.timer = setTimeout(() => { - callStream.cancelWithStatus( - Status.DEADLINE_EXCEEDED, - 'Deadline exceeded' - ); - }, timeout); - this.timer.unref?.(); - } - } - - async sendMetadata(metadata: Promise) { - if (this.deadline === Infinity) { - return metadata; - } - /* The input metadata promise depends on the original channel.connect() - * promise, so when it is complete that implies that the channel is - * connected */ - const finalMetadata = await metadata; - const timeoutString = getDeadline(this.deadline); - finalMetadata.set('grpc-timeout', timeoutString); - return finalMetadata; - } - - receiveTrailers(status: StatusObject) { - if (this.timer) { - clearTimeout(this.timer); - } - return status; - } -} - -export class DeadlineFilterFactory implements FilterFactory { - constructor(private readonly channel: Channel) {} - - createFilter(callStream: Call): DeadlineFilter { - return new DeadlineFilter(this.channel, callStream); - } -} diff --git a/packages/grpc-js/src/deadline.ts b/packages/grpc-js/src/deadline.ts new file mode 100644 index 000000000..de05e381e --- /dev/null +++ b/packages/grpc-js/src/deadline.ts @@ -0,0 +1,106 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +export type Deadline = Date | number; + +export function minDeadline(...deadlineList: Deadline[]): Deadline { + let minValue = Infinity; + for (const deadline of deadlineList) { + const deadlineMsecs = + deadline instanceof Date ? deadline.getTime() : deadline; + if (deadlineMsecs < minValue) { + minValue = deadlineMsecs; + } + } + return minValue; +} + +const units: Array<[string, number]> = [ + ['m', 1], + ['S', 1000], + ['M', 60 * 1000], + ['H', 60 * 60 * 1000], +]; + +export function getDeadlineTimeoutString(deadline: Deadline) { + const now = new Date().getTime(); + if (deadline instanceof Date) { + deadline = deadline.getTime(); + } + const timeoutMs = Math.max(deadline - now, 0); + for (const [unit, factor] of units) { + const amount = timeoutMs / factor; + if (amount < 1e8) { + return String(Math.ceil(amount)) + unit; + } + } + throw new Error('Deadline is too far in the future'); +} + +/** + * See https://nodejs.org/api/timers.html#settimeoutcallback-delay-args + * In particular, "When delay is larger than 2147483647 or less than 1, the + * delay will be set to 1. Non-integer delays are truncated to an integer." + * This number of milliseconds is almost 25 days. + */ +const MAX_TIMEOUT_TIME = 2147483647; + +/** + * Get the timeout value that should be passed to setTimeout now for the timer + * to end at the deadline. For any deadline before now, the timer should end + * immediately, represented by a value of 0. For any deadline more than + * MAX_TIMEOUT_TIME milliseconds in the future, a timer cannot be set that will + * end at that time, so it is treated as infinitely far in the future. + * @param deadline + * @returns + */ +export function getRelativeTimeout(deadline: Deadline) { + const deadlineMs = deadline instanceof Date ? deadline.getTime() : deadline; + const now = new Date().getTime(); + const timeout = deadlineMs - now; + if (timeout < 0) { + return 0; + } else if (timeout > MAX_TIMEOUT_TIME) { + return Infinity; + } else { + return timeout; + } +} + +export function deadlineToString(deadline: Deadline): string { + if (deadline instanceof Date) { + return deadline.toISOString(); + } else { + const dateDeadline = new Date(deadline); + if (Number.isNaN(dateDeadline.getTime())) { + return '' + deadline; + } else { + return dateDeadline.toISOString(); + } + } +} + +/** + * Calculate the difference between two dates as a number of seconds and format + * it as a string. + * @param startDate + * @param endDate + * @returns + */ +export function formatDateDifference(startDate: Date, endDate: Date): string { + return ((endDate.getTime() - startDate.getTime()) / 1000).toFixed(3) + 's'; +} diff --git a/packages/grpc-js/src/duration.ts b/packages/grpc-js/src/duration.ts new file mode 100644 index 000000000..ff77dba25 --- /dev/null +++ b/packages/grpc-js/src/duration.ts @@ -0,0 +1,36 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +export interface Duration { + seconds: number; + nanos: number; +} + +export function msToDuration(millis: number): Duration { + return { + seconds: (millis / 1000) | 0, + nanos: ((millis % 1000) * 1_000_000) | 0, + }; +} + +export function durationToMs(duration: Duration): number { + return (duration.seconds * 1000 + duration.nanos / 1_000_000) | 0; +} + +export function isDuration(value: any): value is Duration { + return typeof value.seconds === 'number' && typeof value.nanos === 'number'; +} diff --git a/packages/grpc-js/src/error.ts b/packages/grpc-js/src/error.ts new file mode 100644 index 000000000..105a3eef3 --- /dev/null +++ b/packages/grpc-js/src/error.ts @@ -0,0 +1,37 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +export function getErrorMessage(error: unknown): string { + if (error instanceof Error) { + return error.message; + } else { + return String(error); + } +} + +export function getErrorCode(error: unknown): number | null { + if ( + typeof error === 'object' && + error !== null && + 'code' in error && + typeof (error as Record).code === 'number' + ) { + return (error as Record).code; + } else { + return null; + } +} diff --git a/packages/grpc-js/src/experimental.ts b/packages/grpc-js/src/experimental.ts index f62838c12..1e7a1e143 100644 --- a/packages/grpc-js/src/experimental.ts +++ b/packages/grpc-js/src/experimental.ts @@ -1,12 +1,54 @@ -export { trace } from './logging'; -export { Resolver, ResolverListener, registerResolver, ConfigSelector } from './resolver'; -export { GrpcUri, uriToString } from './uri-parser'; -export { ServiceConfig } from './service-config'; -export { BackoffTimeout } from './backoff-timeout'; -export { LoadBalancer, LoadBalancingConfig, ChannelControlHelper, registerLoadBalancerType, getFirstUsableConfig, validateLoadBalancingConfig } from './load-balancer'; -export { SubchannelAddress, subchannelAddressToString } from './subchannel'; -export { ChildLoadBalancerHandler } from './load-balancer-child-handler'; -export { Picker, UnavailablePicker, QueuePicker, PickResult, PickArgs, PickResultType } from './picker'; -export { Call as CallStream } from './call-stream'; -export { Filter, BaseFilter, FilterFactory } from './filter'; -export { FilterStackFactory } from './filter-stack'; \ No newline at end of file +export { trace, log } from './logging'; +export { + Resolver, + ResolverListener, + registerResolver, + ConfigSelector, + createResolver, +} from './resolver'; +export { GrpcUri, uriToString } from './uri-parser'; +export { Duration, durationToMs } from './duration'; +export { BackoffTimeout } from './backoff-timeout'; +export { + LoadBalancer, + TypedLoadBalancingConfig, + ChannelControlHelper, + createChildChannelControlHelper, + registerLoadBalancerType, + selectLbConfigFromList, + parseLoadBalancingConfig, + isLoadBalancerNameRegistered, +} from './load-balancer'; +export { LeafLoadBalancer } from './load-balancer-pick-first'; +export { + SubchannelAddress, + subchannelAddressToString, + Endpoint, + endpointToString, + endpointHasAddress, + EndpointMap, +} from './subchannel-address'; +export { ChildLoadBalancerHandler } from './load-balancer-child-handler'; +export { + Picker, + UnavailablePicker, + QueuePicker, + PickResult, + PickArgs, + PickResultType, +} from './picker'; +export { Call as CallStream } from './call-interface'; +export { Filter, BaseFilter, FilterFactory } from './filter'; +export { FilterStackFactory } from './filter-stack'; +export { registerAdminService } from './admin'; +export { + SubchannelInterface, + BaseSubchannelWrapper, + ConnectivityStateListener, + HealthListener, +} from './subchannel-interface'; +export { + OutlierDetectionRawConfig, + SuccessRateEjectionConfig, + FailurePercentageEjectionConfig, +} from './load-balancer-outlier-detection'; diff --git a/packages/grpc-js/src/filter-stack.ts b/packages/grpc-js/src/filter-stack.ts index a656a4099..910f5aa36 100644 --- a/packages/grpc-js/src/filter-stack.ts +++ b/packages/grpc-js/src/filter-stack.ts @@ -15,14 +15,14 @@ * */ -import { Call, StatusObject, WriteObject } from './call-stream'; +import { StatusObject, WriteObject } from './call-interface'; import { Filter, FilterFactory } from './filter'; import { Metadata } from './metadata'; export class FilterStack implements Filter { constructor(private readonly filters: Filter[]) {} - sendMetadata(metadata: Promise) { + sendMetadata(metadata: Promise): Promise { let result: Promise = metadata; for (let i = 0; i < this.filters.length; i++) { @@ -71,14 +71,30 @@ export class FilterStack implements Filter { return result; } + + push(filters: Filter[]) { + this.filters.unshift(...filters); + } + + getFilters(): Filter[] { + return this.filters; + } } export class FilterStackFactory implements FilterFactory { constructor(private readonly factories: Array>) {} - createFilter(callStream: Call): FilterStack { + push(filterFactories: FilterFactory[]) { + this.factories.unshift(...filterFactories); + } + + clone(): FilterStackFactory { + return new FilterStackFactory([...this.factories]); + } + + createFilter(): FilterStack { return new FilterStack( - this.factories.map((factory) => factory.createFilter(callStream)) + this.factories.map(factory => factory.createFilter()) ); } } diff --git a/packages/grpc-js/src/filter.ts b/packages/grpc-js/src/filter.ts index c1e412ae5..5313f91a8 100644 --- a/packages/grpc-js/src/filter.ts +++ b/packages/grpc-js/src/filter.ts @@ -15,12 +15,14 @@ * */ -import { Call, StatusObject, WriteObject } from './call-stream'; +import { StatusObject, WriteObject } from './call-interface'; import { Metadata } from './metadata'; /** * Filter classes represent related per-call logic and state that is primarily - * used to modify incoming and outgoing data + * used to modify incoming and outgoing data. All async filters can be + * rejected. The rejection error must be a StatusObject, and a rejection will + * cause the call to end with that status. */ export interface Filter { sendMetadata(metadata: Promise): Promise; @@ -57,5 +59,5 @@ export abstract class BaseFilter implements Filter { } export interface FilterFactory { - createFilter(callStream: Call): T; + createFilter(): T; } diff --git a/packages/grpc-js/src/generated/channelz.ts b/packages/grpc-js/src/generated/channelz.ts new file mode 100644 index 000000000..367cf27f6 --- /dev/null +++ b/packages/grpc-js/src/generated/channelz.ts @@ -0,0 +1,73 @@ +import type * as grpc from '../index'; +import type { MessageTypeDefinition } from '@grpc/proto-loader'; + +import type { ChannelzClient as _grpc_channelz_v1_ChannelzClient, ChannelzDefinition as _grpc_channelz_v1_ChannelzDefinition } from './grpc/channelz/v1/Channelz'; + +type SubtypeConstructor any, Subtype> = { + new(...args: ConstructorParameters): Subtype; +}; + +export interface ProtoGrpcType { + google: { + protobuf: { + Any: MessageTypeDefinition + BoolValue: MessageTypeDefinition + BytesValue: MessageTypeDefinition + DoubleValue: MessageTypeDefinition + Duration: MessageTypeDefinition + FloatValue: MessageTypeDefinition + Int32Value: MessageTypeDefinition + Int64Value: MessageTypeDefinition + StringValue: MessageTypeDefinition + Timestamp: MessageTypeDefinition + UInt32Value: MessageTypeDefinition + UInt64Value: MessageTypeDefinition + } + } + grpc: { + channelz: { + v1: { + Address: MessageTypeDefinition + Channel: MessageTypeDefinition + ChannelConnectivityState: MessageTypeDefinition + ChannelData: MessageTypeDefinition + ChannelRef: MessageTypeDefinition + ChannelTrace: MessageTypeDefinition + ChannelTraceEvent: MessageTypeDefinition + /** + * Channelz is a service exposed by gRPC servers that provides detailed debug + * information. + */ + Channelz: SubtypeConstructor & { service: _grpc_channelz_v1_ChannelzDefinition } + GetChannelRequest: MessageTypeDefinition + GetChannelResponse: MessageTypeDefinition + GetServerRequest: MessageTypeDefinition + GetServerResponse: MessageTypeDefinition + GetServerSocketsRequest: MessageTypeDefinition + GetServerSocketsResponse: MessageTypeDefinition + GetServersRequest: MessageTypeDefinition + GetServersResponse: MessageTypeDefinition + GetSocketRequest: MessageTypeDefinition + GetSocketResponse: MessageTypeDefinition + GetSubchannelRequest: MessageTypeDefinition + GetSubchannelResponse: MessageTypeDefinition + GetTopChannelsRequest: MessageTypeDefinition + GetTopChannelsResponse: MessageTypeDefinition + Security: MessageTypeDefinition + Server: MessageTypeDefinition + ServerData: MessageTypeDefinition + ServerRef: MessageTypeDefinition + Socket: MessageTypeDefinition + SocketData: MessageTypeDefinition + SocketOption: MessageTypeDefinition + SocketOptionLinger: MessageTypeDefinition + SocketOptionTcpInfo: MessageTypeDefinition + SocketOptionTimeout: MessageTypeDefinition + SocketRef: MessageTypeDefinition + Subchannel: MessageTypeDefinition + SubchannelRef: MessageTypeDefinition + } + } + } +} + diff --git a/packages/grpc-js/src/generated/google/protobuf/Any.ts b/packages/grpc-js/src/generated/google/protobuf/Any.ts new file mode 100644 index 000000000..fcaa6724e --- /dev/null +++ b/packages/grpc-js/src/generated/google/protobuf/Any.ts @@ -0,0 +1,13 @@ +// Original file: null + +import type { AnyExtension } from '@grpc/proto-loader'; + +export type Any = AnyExtension | { + type_url: string; + value: Buffer | Uint8Array | string; +} + +export interface Any__Output { + 'type_url': (string); + 'value': (Buffer); +} diff --git a/packages/grpc-js/src/generated/google/protobuf/BoolValue.ts b/packages/grpc-js/src/generated/google/protobuf/BoolValue.ts new file mode 100644 index 000000000..86507eaf1 --- /dev/null +++ b/packages/grpc-js/src/generated/google/protobuf/BoolValue.ts @@ -0,0 +1,10 @@ +// Original file: null + + +export interface BoolValue { + 'value'?: (boolean); +} + +export interface BoolValue__Output { + 'value': (boolean); +} diff --git a/packages/grpc-js/src/generated/google/protobuf/BytesValue.ts b/packages/grpc-js/src/generated/google/protobuf/BytesValue.ts new file mode 100644 index 000000000..9cec76f71 --- /dev/null +++ b/packages/grpc-js/src/generated/google/protobuf/BytesValue.ts @@ -0,0 +1,10 @@ +// Original file: null + + +export interface BytesValue { + 'value'?: (Buffer | Uint8Array | string); +} + +export interface BytesValue__Output { + 'value': (Buffer); +} diff --git a/packages/grpc-js/src/generated/google/protobuf/DoubleValue.ts b/packages/grpc-js/src/generated/google/protobuf/DoubleValue.ts new file mode 100644 index 000000000..d70b303c2 --- /dev/null +++ b/packages/grpc-js/src/generated/google/protobuf/DoubleValue.ts @@ -0,0 +1,10 @@ +// Original file: null + + +export interface DoubleValue { + 'value'?: (number | string); +} + +export interface DoubleValue__Output { + 'value': (number); +} diff --git a/packages/grpc-js/src/generated/google/protobuf/Duration.ts b/packages/grpc-js/src/generated/google/protobuf/Duration.ts new file mode 100644 index 000000000..8595377a0 --- /dev/null +++ b/packages/grpc-js/src/generated/google/protobuf/Duration.ts @@ -0,0 +1,13 @@ +// Original file: null + +import type { Long } from '@grpc/proto-loader'; + +export interface Duration { + 'seconds'?: (number | string | Long); + 'nanos'?: (number); +} + +export interface Duration__Output { + 'seconds': (string); + 'nanos': (number); +} diff --git a/packages/grpc-js/src/generated/google/protobuf/FloatValue.ts b/packages/grpc-js/src/generated/google/protobuf/FloatValue.ts new file mode 100644 index 000000000..54a655fbb --- /dev/null +++ b/packages/grpc-js/src/generated/google/protobuf/FloatValue.ts @@ -0,0 +1,10 @@ +// Original file: null + + +export interface FloatValue { + 'value'?: (number | string); +} + +export interface FloatValue__Output { + 'value': (number); +} diff --git a/packages/grpc-js/src/generated/google/protobuf/Int32Value.ts b/packages/grpc-js/src/generated/google/protobuf/Int32Value.ts new file mode 100644 index 000000000..ec4eeb7ec --- /dev/null +++ b/packages/grpc-js/src/generated/google/protobuf/Int32Value.ts @@ -0,0 +1,10 @@ +// Original file: null + + +export interface Int32Value { + 'value'?: (number); +} + +export interface Int32Value__Output { + 'value': (number); +} diff --git a/packages/grpc-js/src/generated/google/protobuf/Int64Value.ts b/packages/grpc-js/src/generated/google/protobuf/Int64Value.ts new file mode 100644 index 000000000..f7375196d --- /dev/null +++ b/packages/grpc-js/src/generated/google/protobuf/Int64Value.ts @@ -0,0 +1,11 @@ +// Original file: null + +import type { Long } from '@grpc/proto-loader'; + +export interface Int64Value { + 'value'?: (number | string | Long); +} + +export interface Int64Value__Output { + 'value': (string); +} diff --git a/packages/grpc-js/src/generated/google/protobuf/StringValue.ts b/packages/grpc-js/src/generated/google/protobuf/StringValue.ts new file mode 100644 index 000000000..673090e3f --- /dev/null +++ b/packages/grpc-js/src/generated/google/protobuf/StringValue.ts @@ -0,0 +1,10 @@ +// Original file: null + + +export interface StringValue { + 'value'?: (string); +} + +export interface StringValue__Output { + 'value': (string); +} diff --git a/packages/grpc-js/src/generated/google/protobuf/Timestamp.ts b/packages/grpc-js/src/generated/google/protobuf/Timestamp.ts new file mode 100644 index 000000000..ceaa32b5f --- /dev/null +++ b/packages/grpc-js/src/generated/google/protobuf/Timestamp.ts @@ -0,0 +1,13 @@ +// Original file: null + +import type { Long } from '@grpc/proto-loader'; + +export interface Timestamp { + 'seconds'?: (number | string | Long); + 'nanos'?: (number); +} + +export interface Timestamp__Output { + 'seconds': (string); + 'nanos': (number); +} diff --git a/packages/grpc-js/src/generated/google/protobuf/UInt32Value.ts b/packages/grpc-js/src/generated/google/protobuf/UInt32Value.ts new file mode 100644 index 000000000..973ab34a5 --- /dev/null +++ b/packages/grpc-js/src/generated/google/protobuf/UInt32Value.ts @@ -0,0 +1,10 @@ +// Original file: null + + +export interface UInt32Value { + 'value'?: (number); +} + +export interface UInt32Value__Output { + 'value': (number); +} diff --git a/packages/grpc-js/src/generated/google/protobuf/UInt64Value.ts b/packages/grpc-js/src/generated/google/protobuf/UInt64Value.ts new file mode 100644 index 000000000..7a85c39ce --- /dev/null +++ b/packages/grpc-js/src/generated/google/protobuf/UInt64Value.ts @@ -0,0 +1,11 @@ +// Original file: null + +import type { Long } from '@grpc/proto-loader'; + +export interface UInt64Value { + 'value'?: (number | string | Long); +} + +export interface UInt64Value__Output { + 'value': (string); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/Address.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/Address.ts new file mode 100644 index 000000000..259cfeabe --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/Address.ts @@ -0,0 +1,89 @@ +// Original file: proto/channelz.proto + +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../google/protobuf/Any'; + +/** + * An address type not included above. + */ +export interface _grpc_channelz_v1_Address_OtherAddress { + /** + * The human readable version of the value. This value should be set. + */ + 'name'?: (string); + /** + * The actual address message. + */ + 'value'?: (_google_protobuf_Any | null); +} + +/** + * An address type not included above. + */ +export interface _grpc_channelz_v1_Address_OtherAddress__Output { + /** + * The human readable version of the value. This value should be set. + */ + 'name': (string); + /** + * The actual address message. + */ + 'value': (_google_protobuf_Any__Output | null); +} + +export interface _grpc_channelz_v1_Address_TcpIpAddress { + /** + * Either the IPv4 or IPv6 address in bytes. Will be either 4 bytes or 16 + * bytes in length. + */ + 'ip_address'?: (Buffer | Uint8Array | string); + /** + * 0-64k, or -1 if not appropriate. + */ + 'port'?: (number); +} + +export interface _grpc_channelz_v1_Address_TcpIpAddress__Output { + /** + * Either the IPv4 or IPv6 address in bytes. Will be either 4 bytes or 16 + * bytes in length. + */ + 'ip_address': (Buffer); + /** + * 0-64k, or -1 if not appropriate. + */ + 'port': (number); +} + +/** + * A Unix Domain Socket address. + */ +export interface _grpc_channelz_v1_Address_UdsAddress { + 'filename'?: (string); +} + +/** + * A Unix Domain Socket address. + */ +export interface _grpc_channelz_v1_Address_UdsAddress__Output { + 'filename': (string); +} + +/** + * Address represents the address used to create the socket. + */ +export interface Address { + 'tcpip_address'?: (_grpc_channelz_v1_Address_TcpIpAddress | null); + 'uds_address'?: (_grpc_channelz_v1_Address_UdsAddress | null); + 'other_address'?: (_grpc_channelz_v1_Address_OtherAddress | null); + 'address'?: "tcpip_address"|"uds_address"|"other_address"; +} + +/** + * Address represents the address used to create the socket. + */ +export interface Address__Output { + 'tcpip_address'?: (_grpc_channelz_v1_Address_TcpIpAddress__Output | null); + 'uds_address'?: (_grpc_channelz_v1_Address_UdsAddress__Output | null); + 'other_address'?: (_grpc_channelz_v1_Address_OtherAddress__Output | null); + 'address': "tcpip_address"|"uds_address"|"other_address"; +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/Channel.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/Channel.ts new file mode 100644 index 000000000..93b4a261d --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/Channel.ts @@ -0,0 +1,68 @@ +// Original file: proto/channelz.proto + +import type { ChannelRef as _grpc_channelz_v1_ChannelRef, ChannelRef__Output as _grpc_channelz_v1_ChannelRef__Output } from '../../../grpc/channelz/v1/ChannelRef'; +import type { ChannelData as _grpc_channelz_v1_ChannelData, ChannelData__Output as _grpc_channelz_v1_ChannelData__Output } from '../../../grpc/channelz/v1/ChannelData'; +import type { SubchannelRef as _grpc_channelz_v1_SubchannelRef, SubchannelRef__Output as _grpc_channelz_v1_SubchannelRef__Output } from '../../../grpc/channelz/v1/SubchannelRef'; +import type { SocketRef as _grpc_channelz_v1_SocketRef, SocketRef__Output as _grpc_channelz_v1_SocketRef__Output } from '../../../grpc/channelz/v1/SocketRef'; + +/** + * Channel is a logical grouping of channels, subchannels, and sockets. + */ +export interface Channel { + /** + * The identifier for this channel. This should bet set. + */ + 'ref'?: (_grpc_channelz_v1_ChannelRef | null); + /** + * Data specific to this channel. + */ + 'data'?: (_grpc_channelz_v1_ChannelData | null); + /** + * There are no ordering guarantees on the order of channel refs. + * There may not be cycles in the ref graph. + * A channel ref may be present in more than one channel or subchannel. + */ + 'channel_ref'?: (_grpc_channelz_v1_ChannelRef)[]; + /** + * At most one of 'channel_ref+subchannel_ref' and 'socket' is set. + * There are no ordering guarantees on the order of subchannel refs. + * There may not be cycles in the ref graph. + * A sub channel ref may be present in more than one channel or subchannel. + */ + 'subchannel_ref'?: (_grpc_channelz_v1_SubchannelRef)[]; + /** + * There are no ordering guarantees on the order of sockets. + */ + 'socket_ref'?: (_grpc_channelz_v1_SocketRef)[]; +} + +/** + * Channel is a logical grouping of channels, subchannels, and sockets. + */ +export interface Channel__Output { + /** + * The identifier for this channel. This should bet set. + */ + 'ref': (_grpc_channelz_v1_ChannelRef__Output | null); + /** + * Data specific to this channel. + */ + 'data': (_grpc_channelz_v1_ChannelData__Output | null); + /** + * There are no ordering guarantees on the order of channel refs. + * There may not be cycles in the ref graph. + * A channel ref may be present in more than one channel or subchannel. + */ + 'channel_ref': (_grpc_channelz_v1_ChannelRef__Output)[]; + /** + * At most one of 'channel_ref+subchannel_ref' and 'socket' is set. + * There are no ordering guarantees on the order of subchannel refs. + * There may not be cycles in the ref graph. + * A sub channel ref may be present in more than one channel or subchannel. + */ + 'subchannel_ref': (_grpc_channelz_v1_SubchannelRef__Output)[]; + /** + * There are no ordering guarantees on the order of sockets. + */ + 'socket_ref': (_grpc_channelz_v1_SocketRef__Output)[]; +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/ChannelConnectivityState.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/ChannelConnectivityState.ts new file mode 100644 index 000000000..78fb0693e --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/ChannelConnectivityState.ts @@ -0,0 +1,45 @@ +// Original file: proto/channelz.proto + + +// Original file: proto/channelz.proto + +export const _grpc_channelz_v1_ChannelConnectivityState_State = { + UNKNOWN: 'UNKNOWN', + IDLE: 'IDLE', + CONNECTING: 'CONNECTING', + READY: 'READY', + TRANSIENT_FAILURE: 'TRANSIENT_FAILURE', + SHUTDOWN: 'SHUTDOWN', +} as const; + +export type _grpc_channelz_v1_ChannelConnectivityState_State = + | 'UNKNOWN' + | 0 + | 'IDLE' + | 1 + | 'CONNECTING' + | 2 + | 'READY' + | 3 + | 'TRANSIENT_FAILURE' + | 4 + | 'SHUTDOWN' + | 5 + +export type _grpc_channelz_v1_ChannelConnectivityState_State__Output = typeof _grpc_channelz_v1_ChannelConnectivityState_State[keyof typeof _grpc_channelz_v1_ChannelConnectivityState_State] + +/** + * These come from the specified states in this document: + * https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md + */ +export interface ChannelConnectivityState { + 'state'?: (_grpc_channelz_v1_ChannelConnectivityState_State); +} + +/** + * These come from the specified states in this document: + * https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md + */ +export interface ChannelConnectivityState__Output { + 'state': (_grpc_channelz_v1_ChannelConnectivityState_State__Output); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/ChannelData.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/ChannelData.ts new file mode 100644 index 000000000..6d6824af4 --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/ChannelData.ts @@ -0,0 +1,76 @@ +// Original file: proto/channelz.proto + +import type { ChannelConnectivityState as _grpc_channelz_v1_ChannelConnectivityState, ChannelConnectivityState__Output as _grpc_channelz_v1_ChannelConnectivityState__Output } from '../../../grpc/channelz/v1/ChannelConnectivityState'; +import type { ChannelTrace as _grpc_channelz_v1_ChannelTrace, ChannelTrace__Output as _grpc_channelz_v1_ChannelTrace__Output } from '../../../grpc/channelz/v1/ChannelTrace'; +import type { Timestamp as _google_protobuf_Timestamp, Timestamp__Output as _google_protobuf_Timestamp__Output } from '../../../google/protobuf/Timestamp'; +import type { Long } from '@grpc/proto-loader'; + +/** + * Channel data is data related to a specific Channel or Subchannel. + */ +export interface ChannelData { + /** + * The connectivity state of the channel or subchannel. Implementations + * should always set this. + */ + 'state'?: (_grpc_channelz_v1_ChannelConnectivityState | null); + /** + * The target this channel originally tried to connect to. May be absent + */ + 'target'?: (string); + /** + * A trace of recent events on the channel. May be absent. + */ + 'trace'?: (_grpc_channelz_v1_ChannelTrace | null); + /** + * The number of calls started on the channel + */ + 'calls_started'?: (number | string | Long); + /** + * The number of calls that have completed with an OK status + */ + 'calls_succeeded'?: (number | string | Long); + /** + * The number of calls that have completed with a non-OK status + */ + 'calls_failed'?: (number | string | Long); + /** + * The last time a call was started on the channel. + */ + 'last_call_started_timestamp'?: (_google_protobuf_Timestamp | null); +} + +/** + * Channel data is data related to a specific Channel or Subchannel. + */ +export interface ChannelData__Output { + /** + * The connectivity state of the channel or subchannel. Implementations + * should always set this. + */ + 'state': (_grpc_channelz_v1_ChannelConnectivityState__Output | null); + /** + * The target this channel originally tried to connect to. May be absent + */ + 'target': (string); + /** + * A trace of recent events on the channel. May be absent. + */ + 'trace': (_grpc_channelz_v1_ChannelTrace__Output | null); + /** + * The number of calls started on the channel + */ + 'calls_started': (string); + /** + * The number of calls that have completed with an OK status + */ + 'calls_succeeded': (string); + /** + * The number of calls that have completed with a non-OK status + */ + 'calls_failed': (string); + /** + * The last time a call was started on the channel. + */ + 'last_call_started_timestamp': (_google_protobuf_Timestamp__Output | null); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/ChannelRef.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/ChannelRef.ts new file mode 100644 index 000000000..231d00876 --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/ChannelRef.ts @@ -0,0 +1,31 @@ +// Original file: proto/channelz.proto + +import type { Long } from '@grpc/proto-loader'; + +/** + * ChannelRef is a reference to a Channel. + */ +export interface ChannelRef { + /** + * The globally unique id for this channel. Must be a positive number. + */ + 'channel_id'?: (number | string | Long); + /** + * An optional name associated with the channel. + */ + 'name'?: (string); +} + +/** + * ChannelRef is a reference to a Channel. + */ +export interface ChannelRef__Output { + /** + * The globally unique id for this channel. Must be a positive number. + */ + 'channel_id': (string); + /** + * An optional name associated with the channel. + */ + 'name': (string); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/ChannelTrace.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/ChannelTrace.ts new file mode 100644 index 000000000..7dbc8d924 --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/ChannelTrace.ts @@ -0,0 +1,45 @@ +// Original file: proto/channelz.proto + +import type { Timestamp as _google_protobuf_Timestamp, Timestamp__Output as _google_protobuf_Timestamp__Output } from '../../../google/protobuf/Timestamp'; +import type { ChannelTraceEvent as _grpc_channelz_v1_ChannelTraceEvent, ChannelTraceEvent__Output as _grpc_channelz_v1_ChannelTraceEvent__Output } from '../../../grpc/channelz/v1/ChannelTraceEvent'; +import type { Long } from '@grpc/proto-loader'; + +/** + * ChannelTrace represents the recent events that have occurred on the channel. + */ +export interface ChannelTrace { + /** + * Number of events ever logged in this tracing object. This can differ from + * events.size() because events can be overwritten or garbage collected by + * implementations. + */ + 'num_events_logged'?: (number | string | Long); + /** + * Time that this channel was created. + */ + 'creation_timestamp'?: (_google_protobuf_Timestamp | null); + /** + * List of events that have occurred on this channel. + */ + 'events'?: (_grpc_channelz_v1_ChannelTraceEvent)[]; +} + +/** + * ChannelTrace represents the recent events that have occurred on the channel. + */ +export interface ChannelTrace__Output { + /** + * Number of events ever logged in this tracing object. This can differ from + * events.size() because events can be overwritten or garbage collected by + * implementations. + */ + 'num_events_logged': (string); + /** + * Time that this channel was created. + */ + 'creation_timestamp': (_google_protobuf_Timestamp__Output | null); + /** + * List of events that have occurred on this channel. + */ + 'events': (_grpc_channelz_v1_ChannelTraceEvent__Output)[]; +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/ChannelTraceEvent.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/ChannelTraceEvent.ts new file mode 100644 index 000000000..403e4f123 --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/ChannelTraceEvent.ts @@ -0,0 +1,91 @@ +// Original file: proto/channelz.proto + +import type { Timestamp as _google_protobuf_Timestamp, Timestamp__Output as _google_protobuf_Timestamp__Output } from '../../../google/protobuf/Timestamp'; +import type { ChannelRef as _grpc_channelz_v1_ChannelRef, ChannelRef__Output as _grpc_channelz_v1_ChannelRef__Output } from '../../../grpc/channelz/v1/ChannelRef'; +import type { SubchannelRef as _grpc_channelz_v1_SubchannelRef, SubchannelRef__Output as _grpc_channelz_v1_SubchannelRef__Output } from '../../../grpc/channelz/v1/SubchannelRef'; + +// Original file: proto/channelz.proto + +/** + * The supported severity levels of trace events. + */ +export const _grpc_channelz_v1_ChannelTraceEvent_Severity = { + CT_UNKNOWN: 'CT_UNKNOWN', + CT_INFO: 'CT_INFO', + CT_WARNING: 'CT_WARNING', + CT_ERROR: 'CT_ERROR', +} as const; + +/** + * The supported severity levels of trace events. + */ +export type _grpc_channelz_v1_ChannelTraceEvent_Severity = + | 'CT_UNKNOWN' + | 0 + | 'CT_INFO' + | 1 + | 'CT_WARNING' + | 2 + | 'CT_ERROR' + | 3 + +/** + * The supported severity levels of trace events. + */ +export type _grpc_channelz_v1_ChannelTraceEvent_Severity__Output = typeof _grpc_channelz_v1_ChannelTraceEvent_Severity[keyof typeof _grpc_channelz_v1_ChannelTraceEvent_Severity] + +/** + * A trace event is an interesting thing that happened to a channel or + * subchannel, such as creation, address resolution, subchannel creation, etc. + */ +export interface ChannelTraceEvent { + /** + * High level description of the event. + */ + 'description'?: (string); + /** + * the severity of the trace event + */ + 'severity'?: (_grpc_channelz_v1_ChannelTraceEvent_Severity); + /** + * When this event occurred. + */ + 'timestamp'?: (_google_protobuf_Timestamp | null); + 'channel_ref'?: (_grpc_channelz_v1_ChannelRef | null); + 'subchannel_ref'?: (_grpc_channelz_v1_SubchannelRef | null); + /** + * ref of referenced channel or subchannel. + * Optional, only present if this event refers to a child object. For example, + * this field would be filled if this trace event was for a subchannel being + * created. + */ + 'child_ref'?: "channel_ref"|"subchannel_ref"; +} + +/** + * A trace event is an interesting thing that happened to a channel or + * subchannel, such as creation, address resolution, subchannel creation, etc. + */ +export interface ChannelTraceEvent__Output { + /** + * High level description of the event. + */ + 'description': (string); + /** + * the severity of the trace event + */ + 'severity': (_grpc_channelz_v1_ChannelTraceEvent_Severity__Output); + /** + * When this event occurred. + */ + 'timestamp': (_google_protobuf_Timestamp__Output | null); + 'channel_ref'?: (_grpc_channelz_v1_ChannelRef__Output | null); + 'subchannel_ref'?: (_grpc_channelz_v1_SubchannelRef__Output | null); + /** + * ref of referenced channel or subchannel. + * Optional, only present if this event refers to a child object. For example, + * this field would be filled if this trace event was for a subchannel being + * created. + */ + 'child_ref': "channel_ref"|"subchannel_ref"; +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/Channelz.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/Channelz.ts new file mode 100644 index 000000000..4c8c18aa7 --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/Channelz.ts @@ -0,0 +1,178 @@ +// Original file: proto/channelz.proto + +import type * as grpc from '../../../../index' +import type { MethodDefinition } from '@grpc/proto-loader' +import type { GetChannelRequest as _grpc_channelz_v1_GetChannelRequest, GetChannelRequest__Output as _grpc_channelz_v1_GetChannelRequest__Output } from '../../../grpc/channelz/v1/GetChannelRequest'; +import type { GetChannelResponse as _grpc_channelz_v1_GetChannelResponse, GetChannelResponse__Output as _grpc_channelz_v1_GetChannelResponse__Output } from '../../../grpc/channelz/v1/GetChannelResponse'; +import type { GetServerRequest as _grpc_channelz_v1_GetServerRequest, GetServerRequest__Output as _grpc_channelz_v1_GetServerRequest__Output } from '../../../grpc/channelz/v1/GetServerRequest'; +import type { GetServerResponse as _grpc_channelz_v1_GetServerResponse, GetServerResponse__Output as _grpc_channelz_v1_GetServerResponse__Output } from '../../../grpc/channelz/v1/GetServerResponse'; +import type { GetServerSocketsRequest as _grpc_channelz_v1_GetServerSocketsRequest, GetServerSocketsRequest__Output as _grpc_channelz_v1_GetServerSocketsRequest__Output } from '../../../grpc/channelz/v1/GetServerSocketsRequest'; +import type { GetServerSocketsResponse as _grpc_channelz_v1_GetServerSocketsResponse, GetServerSocketsResponse__Output as _grpc_channelz_v1_GetServerSocketsResponse__Output } from '../../../grpc/channelz/v1/GetServerSocketsResponse'; +import type { GetServersRequest as _grpc_channelz_v1_GetServersRequest, GetServersRequest__Output as _grpc_channelz_v1_GetServersRequest__Output } from '../../../grpc/channelz/v1/GetServersRequest'; +import type { GetServersResponse as _grpc_channelz_v1_GetServersResponse, GetServersResponse__Output as _grpc_channelz_v1_GetServersResponse__Output } from '../../../grpc/channelz/v1/GetServersResponse'; +import type { GetSocketRequest as _grpc_channelz_v1_GetSocketRequest, GetSocketRequest__Output as _grpc_channelz_v1_GetSocketRequest__Output } from '../../../grpc/channelz/v1/GetSocketRequest'; +import type { GetSocketResponse as _grpc_channelz_v1_GetSocketResponse, GetSocketResponse__Output as _grpc_channelz_v1_GetSocketResponse__Output } from '../../../grpc/channelz/v1/GetSocketResponse'; +import type { GetSubchannelRequest as _grpc_channelz_v1_GetSubchannelRequest, GetSubchannelRequest__Output as _grpc_channelz_v1_GetSubchannelRequest__Output } from '../../../grpc/channelz/v1/GetSubchannelRequest'; +import type { GetSubchannelResponse as _grpc_channelz_v1_GetSubchannelResponse, GetSubchannelResponse__Output as _grpc_channelz_v1_GetSubchannelResponse__Output } from '../../../grpc/channelz/v1/GetSubchannelResponse'; +import type { GetTopChannelsRequest as _grpc_channelz_v1_GetTopChannelsRequest, GetTopChannelsRequest__Output as _grpc_channelz_v1_GetTopChannelsRequest__Output } from '../../../grpc/channelz/v1/GetTopChannelsRequest'; +import type { GetTopChannelsResponse as _grpc_channelz_v1_GetTopChannelsResponse, GetTopChannelsResponse__Output as _grpc_channelz_v1_GetTopChannelsResponse__Output } from '../../../grpc/channelz/v1/GetTopChannelsResponse'; + +/** + * Channelz is a service exposed by gRPC servers that provides detailed debug + * information. + */ +export interface ChannelzClient extends grpc.Client { + /** + * Returns a single Channel, or else a NOT_FOUND code. + */ + GetChannel(argument: _grpc_channelz_v1_GetChannelRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetChannelResponse__Output>): grpc.ClientUnaryCall; + GetChannel(argument: _grpc_channelz_v1_GetChannelRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_channelz_v1_GetChannelResponse__Output>): grpc.ClientUnaryCall; + GetChannel(argument: _grpc_channelz_v1_GetChannelRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetChannelResponse__Output>): grpc.ClientUnaryCall; + GetChannel(argument: _grpc_channelz_v1_GetChannelRequest, callback: grpc.requestCallback<_grpc_channelz_v1_GetChannelResponse__Output>): grpc.ClientUnaryCall; + + /** + * Returns a single Server, or else a NOT_FOUND code. + */ + GetServer(argument: _grpc_channelz_v1_GetServerRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetServerResponse__Output>): grpc.ClientUnaryCall; + GetServer(argument: _grpc_channelz_v1_GetServerRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_channelz_v1_GetServerResponse__Output>): grpc.ClientUnaryCall; + GetServer(argument: _grpc_channelz_v1_GetServerRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetServerResponse__Output>): grpc.ClientUnaryCall; + GetServer(argument: _grpc_channelz_v1_GetServerRequest, callback: grpc.requestCallback<_grpc_channelz_v1_GetServerResponse__Output>): grpc.ClientUnaryCall; + /** + * Returns a single Server, or else a NOT_FOUND code. + */ + getServer(argument: _grpc_channelz_v1_GetServerRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetServerResponse__Output>): grpc.ClientUnaryCall; + getServer(argument: _grpc_channelz_v1_GetServerRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_channelz_v1_GetServerResponse__Output>): grpc.ClientUnaryCall; + getServer(argument: _grpc_channelz_v1_GetServerRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetServerResponse__Output>): grpc.ClientUnaryCall; + getServer(argument: _grpc_channelz_v1_GetServerRequest, callback: grpc.requestCallback<_grpc_channelz_v1_GetServerResponse__Output>): grpc.ClientUnaryCall; + + /** + * Gets all server sockets that exist in the process. + */ + GetServerSockets(argument: _grpc_channelz_v1_GetServerSocketsRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetServerSocketsResponse__Output>): grpc.ClientUnaryCall; + GetServerSockets(argument: _grpc_channelz_v1_GetServerSocketsRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_channelz_v1_GetServerSocketsResponse__Output>): grpc.ClientUnaryCall; + GetServerSockets(argument: _grpc_channelz_v1_GetServerSocketsRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetServerSocketsResponse__Output>): grpc.ClientUnaryCall; + GetServerSockets(argument: _grpc_channelz_v1_GetServerSocketsRequest, callback: grpc.requestCallback<_grpc_channelz_v1_GetServerSocketsResponse__Output>): grpc.ClientUnaryCall; + /** + * Gets all server sockets that exist in the process. + */ + getServerSockets(argument: _grpc_channelz_v1_GetServerSocketsRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetServerSocketsResponse__Output>): grpc.ClientUnaryCall; + getServerSockets(argument: _grpc_channelz_v1_GetServerSocketsRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_channelz_v1_GetServerSocketsResponse__Output>): grpc.ClientUnaryCall; + getServerSockets(argument: _grpc_channelz_v1_GetServerSocketsRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetServerSocketsResponse__Output>): grpc.ClientUnaryCall; + getServerSockets(argument: _grpc_channelz_v1_GetServerSocketsRequest, callback: grpc.requestCallback<_grpc_channelz_v1_GetServerSocketsResponse__Output>): grpc.ClientUnaryCall; + + /** + * Gets all servers that exist in the process. + */ + GetServers(argument: _grpc_channelz_v1_GetServersRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetServersResponse__Output>): grpc.ClientUnaryCall; + GetServers(argument: _grpc_channelz_v1_GetServersRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_channelz_v1_GetServersResponse__Output>): grpc.ClientUnaryCall; + GetServers(argument: _grpc_channelz_v1_GetServersRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetServersResponse__Output>): grpc.ClientUnaryCall; + GetServers(argument: _grpc_channelz_v1_GetServersRequest, callback: grpc.requestCallback<_grpc_channelz_v1_GetServersResponse__Output>): grpc.ClientUnaryCall; + /** + * Gets all servers that exist in the process. + */ + getServers(argument: _grpc_channelz_v1_GetServersRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetServersResponse__Output>): grpc.ClientUnaryCall; + getServers(argument: _grpc_channelz_v1_GetServersRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_channelz_v1_GetServersResponse__Output>): grpc.ClientUnaryCall; + getServers(argument: _grpc_channelz_v1_GetServersRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetServersResponse__Output>): grpc.ClientUnaryCall; + getServers(argument: _grpc_channelz_v1_GetServersRequest, callback: grpc.requestCallback<_grpc_channelz_v1_GetServersResponse__Output>): grpc.ClientUnaryCall; + + /** + * Returns a single Socket or else a NOT_FOUND code. + */ + GetSocket(argument: _grpc_channelz_v1_GetSocketRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetSocketResponse__Output>): grpc.ClientUnaryCall; + GetSocket(argument: _grpc_channelz_v1_GetSocketRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_channelz_v1_GetSocketResponse__Output>): grpc.ClientUnaryCall; + GetSocket(argument: _grpc_channelz_v1_GetSocketRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetSocketResponse__Output>): grpc.ClientUnaryCall; + GetSocket(argument: _grpc_channelz_v1_GetSocketRequest, callback: grpc.requestCallback<_grpc_channelz_v1_GetSocketResponse__Output>): grpc.ClientUnaryCall; + /** + * Returns a single Socket or else a NOT_FOUND code. + */ + getSocket(argument: _grpc_channelz_v1_GetSocketRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetSocketResponse__Output>): grpc.ClientUnaryCall; + getSocket(argument: _grpc_channelz_v1_GetSocketRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_channelz_v1_GetSocketResponse__Output>): grpc.ClientUnaryCall; + getSocket(argument: _grpc_channelz_v1_GetSocketRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetSocketResponse__Output>): grpc.ClientUnaryCall; + getSocket(argument: _grpc_channelz_v1_GetSocketRequest, callback: grpc.requestCallback<_grpc_channelz_v1_GetSocketResponse__Output>): grpc.ClientUnaryCall; + + /** + * Returns a single Subchannel, or else a NOT_FOUND code. + */ + GetSubchannel(argument: _grpc_channelz_v1_GetSubchannelRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetSubchannelResponse__Output>): grpc.ClientUnaryCall; + GetSubchannel(argument: _grpc_channelz_v1_GetSubchannelRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_channelz_v1_GetSubchannelResponse__Output>): grpc.ClientUnaryCall; + GetSubchannel(argument: _grpc_channelz_v1_GetSubchannelRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetSubchannelResponse__Output>): grpc.ClientUnaryCall; + GetSubchannel(argument: _grpc_channelz_v1_GetSubchannelRequest, callback: grpc.requestCallback<_grpc_channelz_v1_GetSubchannelResponse__Output>): grpc.ClientUnaryCall; + /** + * Returns a single Subchannel, or else a NOT_FOUND code. + */ + getSubchannel(argument: _grpc_channelz_v1_GetSubchannelRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetSubchannelResponse__Output>): grpc.ClientUnaryCall; + getSubchannel(argument: _grpc_channelz_v1_GetSubchannelRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_channelz_v1_GetSubchannelResponse__Output>): grpc.ClientUnaryCall; + getSubchannel(argument: _grpc_channelz_v1_GetSubchannelRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetSubchannelResponse__Output>): grpc.ClientUnaryCall; + getSubchannel(argument: _grpc_channelz_v1_GetSubchannelRequest, callback: grpc.requestCallback<_grpc_channelz_v1_GetSubchannelResponse__Output>): grpc.ClientUnaryCall; + + /** + * Gets all root channels (i.e. channels the application has directly + * created). This does not include subchannels nor non-top level channels. + */ + GetTopChannels(argument: _grpc_channelz_v1_GetTopChannelsRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetTopChannelsResponse__Output>): grpc.ClientUnaryCall; + GetTopChannels(argument: _grpc_channelz_v1_GetTopChannelsRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_channelz_v1_GetTopChannelsResponse__Output>): grpc.ClientUnaryCall; + GetTopChannels(argument: _grpc_channelz_v1_GetTopChannelsRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetTopChannelsResponse__Output>): grpc.ClientUnaryCall; + GetTopChannels(argument: _grpc_channelz_v1_GetTopChannelsRequest, callback: grpc.requestCallback<_grpc_channelz_v1_GetTopChannelsResponse__Output>): grpc.ClientUnaryCall; + /** + * Gets all root channels (i.e. channels the application has directly + * created). This does not include subchannels nor non-top level channels. + */ + getTopChannels(argument: _grpc_channelz_v1_GetTopChannelsRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetTopChannelsResponse__Output>): grpc.ClientUnaryCall; + getTopChannels(argument: _grpc_channelz_v1_GetTopChannelsRequest, metadata: grpc.Metadata, callback: grpc.requestCallback<_grpc_channelz_v1_GetTopChannelsResponse__Output>): grpc.ClientUnaryCall; + getTopChannels(argument: _grpc_channelz_v1_GetTopChannelsRequest, options: grpc.CallOptions, callback: grpc.requestCallback<_grpc_channelz_v1_GetTopChannelsResponse__Output>): grpc.ClientUnaryCall; + getTopChannels(argument: _grpc_channelz_v1_GetTopChannelsRequest, callback: grpc.requestCallback<_grpc_channelz_v1_GetTopChannelsResponse__Output>): grpc.ClientUnaryCall; + +} + +/** + * Channelz is a service exposed by gRPC servers that provides detailed debug + * information. + */ +export interface ChannelzHandlers extends grpc.UntypedServiceImplementation { + /** + * Returns a single Channel, or else a NOT_FOUND code. + */ + GetChannel: grpc.handleUnaryCall<_grpc_channelz_v1_GetChannelRequest__Output, _grpc_channelz_v1_GetChannelResponse>; + + /** + * Returns a single Server, or else a NOT_FOUND code. + */ + GetServer: grpc.handleUnaryCall<_grpc_channelz_v1_GetServerRequest__Output, _grpc_channelz_v1_GetServerResponse>; + + /** + * Gets all server sockets that exist in the process. + */ + GetServerSockets: grpc.handleUnaryCall<_grpc_channelz_v1_GetServerSocketsRequest__Output, _grpc_channelz_v1_GetServerSocketsResponse>; + + /** + * Gets all servers that exist in the process. + */ + GetServers: grpc.handleUnaryCall<_grpc_channelz_v1_GetServersRequest__Output, _grpc_channelz_v1_GetServersResponse>; + + /** + * Returns a single Socket or else a NOT_FOUND code. + */ + GetSocket: grpc.handleUnaryCall<_grpc_channelz_v1_GetSocketRequest__Output, _grpc_channelz_v1_GetSocketResponse>; + + /** + * Returns a single Subchannel, or else a NOT_FOUND code. + */ + GetSubchannel: grpc.handleUnaryCall<_grpc_channelz_v1_GetSubchannelRequest__Output, _grpc_channelz_v1_GetSubchannelResponse>; + + /** + * Gets all root channels (i.e. channels the application has directly + * created). This does not include subchannels nor non-top level channels. + */ + GetTopChannels: grpc.handleUnaryCall<_grpc_channelz_v1_GetTopChannelsRequest__Output, _grpc_channelz_v1_GetTopChannelsResponse>; + +} + +export interface ChannelzDefinition extends grpc.ServiceDefinition { + GetChannel: MethodDefinition<_grpc_channelz_v1_GetChannelRequest, _grpc_channelz_v1_GetChannelResponse, _grpc_channelz_v1_GetChannelRequest__Output, _grpc_channelz_v1_GetChannelResponse__Output> + GetServer: MethodDefinition<_grpc_channelz_v1_GetServerRequest, _grpc_channelz_v1_GetServerResponse, _grpc_channelz_v1_GetServerRequest__Output, _grpc_channelz_v1_GetServerResponse__Output> + GetServerSockets: MethodDefinition<_grpc_channelz_v1_GetServerSocketsRequest, _grpc_channelz_v1_GetServerSocketsResponse, _grpc_channelz_v1_GetServerSocketsRequest__Output, _grpc_channelz_v1_GetServerSocketsResponse__Output> + GetServers: MethodDefinition<_grpc_channelz_v1_GetServersRequest, _grpc_channelz_v1_GetServersResponse, _grpc_channelz_v1_GetServersRequest__Output, _grpc_channelz_v1_GetServersResponse__Output> + GetSocket: MethodDefinition<_grpc_channelz_v1_GetSocketRequest, _grpc_channelz_v1_GetSocketResponse, _grpc_channelz_v1_GetSocketRequest__Output, _grpc_channelz_v1_GetSocketResponse__Output> + GetSubchannel: MethodDefinition<_grpc_channelz_v1_GetSubchannelRequest, _grpc_channelz_v1_GetSubchannelResponse, _grpc_channelz_v1_GetSubchannelRequest__Output, _grpc_channelz_v1_GetSubchannelResponse__Output> + GetTopChannels: MethodDefinition<_grpc_channelz_v1_GetTopChannelsRequest, _grpc_channelz_v1_GetTopChannelsResponse, _grpc_channelz_v1_GetTopChannelsRequest__Output, _grpc_channelz_v1_GetTopChannelsResponse__Output> +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/GetChannelRequest.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/GetChannelRequest.ts new file mode 100644 index 000000000..437e2d60a --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/GetChannelRequest.ts @@ -0,0 +1,17 @@ +// Original file: proto/channelz.proto + +import type { Long } from '@grpc/proto-loader'; + +export interface GetChannelRequest { + /** + * channel_id is the identifier of the specific channel to get. + */ + 'channel_id'?: (number | string | Long); +} + +export interface GetChannelRequest__Output { + /** + * channel_id is the identifier of the specific channel to get. + */ + 'channel_id': (string); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/GetChannelResponse.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/GetChannelResponse.ts new file mode 100644 index 000000000..2e967a458 --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/GetChannelResponse.ts @@ -0,0 +1,19 @@ +// Original file: proto/channelz.proto + +import type { Channel as _grpc_channelz_v1_Channel, Channel__Output as _grpc_channelz_v1_Channel__Output } from '../../../grpc/channelz/v1/Channel'; + +export interface GetChannelResponse { + /** + * The Channel that corresponds to the requested channel_id. This field + * should be set. + */ + 'channel'?: (_grpc_channelz_v1_Channel | null); +} + +export interface GetChannelResponse__Output { + /** + * The Channel that corresponds to the requested channel_id. This field + * should be set. + */ + 'channel': (_grpc_channelz_v1_Channel__Output | null); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/GetServerRequest.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/GetServerRequest.ts new file mode 100644 index 000000000..f5d4a298f --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/GetServerRequest.ts @@ -0,0 +1,17 @@ +// Original file: proto/channelz.proto + +import type { Long } from '@grpc/proto-loader'; + +export interface GetServerRequest { + /** + * server_id is the identifier of the specific server to get. + */ + 'server_id'?: (number | string | Long); +} + +export interface GetServerRequest__Output { + /** + * server_id is the identifier of the specific server to get. + */ + 'server_id': (string); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/GetServerResponse.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/GetServerResponse.ts new file mode 100644 index 000000000..fe0078209 --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/GetServerResponse.ts @@ -0,0 +1,19 @@ +// Original file: proto/channelz.proto + +import type { Server as _grpc_channelz_v1_Server, Server__Output as _grpc_channelz_v1_Server__Output } from '../../../grpc/channelz/v1/Server'; + +export interface GetServerResponse { + /** + * The Server that corresponds to the requested server_id. This field + * should be set. + */ + 'server'?: (_grpc_channelz_v1_Server | null); +} + +export interface GetServerResponse__Output { + /** + * The Server that corresponds to the requested server_id. This field + * should be set. + */ + 'server': (_grpc_channelz_v1_Server__Output | null); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/GetServerSocketsRequest.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/GetServerSocketsRequest.ts new file mode 100644 index 000000000..c33056edc --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/GetServerSocketsRequest.ts @@ -0,0 +1,39 @@ +// Original file: proto/channelz.proto + +import type { Long } from '@grpc/proto-loader'; + +export interface GetServerSocketsRequest { + 'server_id'?: (number | string | Long); + /** + * start_socket_id indicates that only sockets at or above this id should be + * included in the results. + * To request the first page, this must be set to 0. To request + * subsequent pages, the client generates this value by adding 1 to + * the highest seen result ID. + */ + 'start_socket_id'?: (number | string | Long); + /** + * If non-zero, the server will return a page of results containing + * at most this many items. If zero, the server will choose a + * reasonable page size. Must never be negative. + */ + 'max_results'?: (number | string | Long); +} + +export interface GetServerSocketsRequest__Output { + 'server_id': (string); + /** + * start_socket_id indicates that only sockets at or above this id should be + * included in the results. + * To request the first page, this must be set to 0. To request + * subsequent pages, the client generates this value by adding 1 to + * the highest seen result ID. + */ + 'start_socket_id': (string); + /** + * If non-zero, the server will return a page of results containing + * at most this many items. If zero, the server will choose a + * reasonable page size. Must never be negative. + */ + 'max_results': (string); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/GetServerSocketsResponse.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/GetServerSocketsResponse.ts new file mode 100644 index 000000000..112f277e3 --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/GetServerSocketsResponse.ts @@ -0,0 +1,33 @@ +// Original file: proto/channelz.proto + +import type { SocketRef as _grpc_channelz_v1_SocketRef, SocketRef__Output as _grpc_channelz_v1_SocketRef__Output } from '../../../grpc/channelz/v1/SocketRef'; + +export interface GetServerSocketsResponse { + /** + * list of socket refs that the connection detail service knows about. Sorted in + * ascending socket_id order. + * Must contain at least 1 result, otherwise 'end' must be true. + */ + 'socket_ref'?: (_grpc_channelz_v1_SocketRef)[]; + /** + * If set, indicates that the list of sockets is the final list. Requesting + * more sockets will only return more if they are created after this RPC + * completes. + */ + 'end'?: (boolean); +} + +export interface GetServerSocketsResponse__Output { + /** + * list of socket refs that the connection detail service knows about. Sorted in + * ascending socket_id order. + * Must contain at least 1 result, otherwise 'end' must be true. + */ + 'socket_ref': (_grpc_channelz_v1_SocketRef__Output)[]; + /** + * If set, indicates that the list of sockets is the final list. Requesting + * more sockets will only return more if they are created after this RPC + * completes. + */ + 'end': (boolean); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/GetServersRequest.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/GetServersRequest.ts new file mode 100644 index 000000000..2defea62d --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/GetServersRequest.ts @@ -0,0 +1,37 @@ +// Original file: proto/channelz.proto + +import type { Long } from '@grpc/proto-loader'; + +export interface GetServersRequest { + /** + * start_server_id indicates that only servers at or above this id should be + * included in the results. + * To request the first page, this must be set to 0. To request + * subsequent pages, the client generates this value by adding 1 to + * the highest seen result ID. + */ + 'start_server_id'?: (number | string | Long); + /** + * If non-zero, the server will return a page of results containing + * at most this many items. If zero, the server will choose a + * reasonable page size. Must never be negative. + */ + 'max_results'?: (number | string | Long); +} + +export interface GetServersRequest__Output { + /** + * start_server_id indicates that only servers at or above this id should be + * included in the results. + * To request the first page, this must be set to 0. To request + * subsequent pages, the client generates this value by adding 1 to + * the highest seen result ID. + */ + 'start_server_id': (string); + /** + * If non-zero, the server will return a page of results containing + * at most this many items. If zero, the server will choose a + * reasonable page size. Must never be negative. + */ + 'max_results': (string); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/GetServersResponse.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/GetServersResponse.ts new file mode 100644 index 000000000..b07893b8c --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/GetServersResponse.ts @@ -0,0 +1,33 @@ +// Original file: proto/channelz.proto + +import type { Server as _grpc_channelz_v1_Server, Server__Output as _grpc_channelz_v1_Server__Output } from '../../../grpc/channelz/v1/Server'; + +export interface GetServersResponse { + /** + * list of servers that the connection detail service knows about. Sorted in + * ascending server_id order. + * Must contain at least 1 result, otherwise 'end' must be true. + */ + 'server'?: (_grpc_channelz_v1_Server)[]; + /** + * If set, indicates that the list of servers is the final list. Requesting + * more servers will only return more if they are created after this RPC + * completes. + */ + 'end'?: (boolean); +} + +export interface GetServersResponse__Output { + /** + * list of servers that the connection detail service knows about. Sorted in + * ascending server_id order. + * Must contain at least 1 result, otherwise 'end' must be true. + */ + 'server': (_grpc_channelz_v1_Server__Output)[]; + /** + * If set, indicates that the list of servers is the final list. Requesting + * more servers will only return more if they are created after this RPC + * completes. + */ + 'end': (boolean); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/GetSocketRequest.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/GetSocketRequest.ts new file mode 100644 index 000000000..b3dc1608e --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/GetSocketRequest.ts @@ -0,0 +1,29 @@ +// Original file: proto/channelz.proto + +import type { Long } from '@grpc/proto-loader'; + +export interface GetSocketRequest { + /** + * socket_id is the identifier of the specific socket to get. + */ + 'socket_id'?: (number | string | Long); + /** + * If true, the response will contain only high level information + * that is inexpensive to obtain. Fields thay may be omitted are + * documented. + */ + 'summary'?: (boolean); +} + +export interface GetSocketRequest__Output { + /** + * socket_id is the identifier of the specific socket to get. + */ + 'socket_id': (string); + /** + * If true, the response will contain only high level information + * that is inexpensive to obtain. Fields thay may be omitted are + * documented. + */ + 'summary': (boolean); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/GetSocketResponse.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/GetSocketResponse.ts new file mode 100644 index 000000000..b6304b7f0 --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/GetSocketResponse.ts @@ -0,0 +1,19 @@ +// Original file: proto/channelz.proto + +import type { Socket as _grpc_channelz_v1_Socket, Socket__Output as _grpc_channelz_v1_Socket__Output } from '../../../grpc/channelz/v1/Socket'; + +export interface GetSocketResponse { + /** + * The Socket that corresponds to the requested socket_id. This field + * should be set. + */ + 'socket'?: (_grpc_channelz_v1_Socket | null); +} + +export interface GetSocketResponse__Output { + /** + * The Socket that corresponds to the requested socket_id. This field + * should be set. + */ + 'socket': (_grpc_channelz_v1_Socket__Output | null); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/GetSubchannelRequest.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/GetSubchannelRequest.ts new file mode 100644 index 000000000..f481a81d2 --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/GetSubchannelRequest.ts @@ -0,0 +1,17 @@ +// Original file: proto/channelz.proto + +import type { Long } from '@grpc/proto-loader'; + +export interface GetSubchannelRequest { + /** + * subchannel_id is the identifier of the specific subchannel to get. + */ + 'subchannel_id'?: (number | string | Long); +} + +export interface GetSubchannelRequest__Output { + /** + * subchannel_id is the identifier of the specific subchannel to get. + */ + 'subchannel_id': (string); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/GetSubchannelResponse.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/GetSubchannelResponse.ts new file mode 100644 index 000000000..57d2bf2dc --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/GetSubchannelResponse.ts @@ -0,0 +1,19 @@ +// Original file: proto/channelz.proto + +import type { Subchannel as _grpc_channelz_v1_Subchannel, Subchannel__Output as _grpc_channelz_v1_Subchannel__Output } from '../../../grpc/channelz/v1/Subchannel'; + +export interface GetSubchannelResponse { + /** + * The Subchannel that corresponds to the requested subchannel_id. This + * field should be set. + */ + 'subchannel'?: (_grpc_channelz_v1_Subchannel | null); +} + +export interface GetSubchannelResponse__Output { + /** + * The Subchannel that corresponds to the requested subchannel_id. This + * field should be set. + */ + 'subchannel': (_grpc_channelz_v1_Subchannel__Output | null); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/GetTopChannelsRequest.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/GetTopChannelsRequest.ts new file mode 100644 index 000000000..a122d7a85 --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/GetTopChannelsRequest.ts @@ -0,0 +1,37 @@ +// Original file: proto/channelz.proto + +import type { Long } from '@grpc/proto-loader'; + +export interface GetTopChannelsRequest { + /** + * start_channel_id indicates that only channels at or above this id should be + * included in the results. + * To request the first page, this should be set to 0. To request + * subsequent pages, the client generates this value by adding 1 to + * the highest seen result ID. + */ + 'start_channel_id'?: (number | string | Long); + /** + * If non-zero, the server will return a page of results containing + * at most this many items. If zero, the server will choose a + * reasonable page size. Must never be negative. + */ + 'max_results'?: (number | string | Long); +} + +export interface GetTopChannelsRequest__Output { + /** + * start_channel_id indicates that only channels at or above this id should be + * included in the results. + * To request the first page, this should be set to 0. To request + * subsequent pages, the client generates this value by adding 1 to + * the highest seen result ID. + */ + 'start_channel_id': (string); + /** + * If non-zero, the server will return a page of results containing + * at most this many items. If zero, the server will choose a + * reasonable page size. Must never be negative. + */ + 'max_results': (string); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/GetTopChannelsResponse.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/GetTopChannelsResponse.ts new file mode 100644 index 000000000..d96e63673 --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/GetTopChannelsResponse.ts @@ -0,0 +1,33 @@ +// Original file: proto/channelz.proto + +import type { Channel as _grpc_channelz_v1_Channel, Channel__Output as _grpc_channelz_v1_Channel__Output } from '../../../grpc/channelz/v1/Channel'; + +export interface GetTopChannelsResponse { + /** + * list of channels that the connection detail service knows about. Sorted in + * ascending channel_id order. + * Must contain at least 1 result, otherwise 'end' must be true. + */ + 'channel'?: (_grpc_channelz_v1_Channel)[]; + /** + * If set, indicates that the list of channels is the final list. Requesting + * more channels can only return more if they are created after this RPC + * completes. + */ + 'end'?: (boolean); +} + +export interface GetTopChannelsResponse__Output { + /** + * list of channels that the connection detail service knows about. Sorted in + * ascending channel_id order. + * Must contain at least 1 result, otherwise 'end' must be true. + */ + 'channel': (_grpc_channelz_v1_Channel__Output)[]; + /** + * If set, indicates that the list of channels is the final list. Requesting + * more channels can only return more if they are created after this RPC + * completes. + */ + 'end': (boolean); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/Security.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/Security.ts new file mode 100644 index 000000000..e555d698e --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/Security.ts @@ -0,0 +1,87 @@ +// Original file: proto/channelz.proto + +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../google/protobuf/Any'; + +export interface _grpc_channelz_v1_Security_OtherSecurity { + /** + * The human readable version of the value. + */ + 'name'?: (string); + /** + * The actual security details message. + */ + 'value'?: (_google_protobuf_Any | null); +} + +export interface _grpc_channelz_v1_Security_OtherSecurity__Output { + /** + * The human readable version of the value. + */ + 'name': (string); + /** + * The actual security details message. + */ + 'value': (_google_protobuf_Any__Output | null); +} + +export interface _grpc_channelz_v1_Security_Tls { + /** + * The cipher suite name in the RFC 4346 format: + * https://tools.ietf.org/html/rfc4346#appendix-C + */ + 'standard_name'?: (string); + /** + * Some other way to describe the cipher suite if + * the RFC 4346 name is not available. + */ + 'other_name'?: (string); + /** + * the certificate used by this endpoint. + */ + 'local_certificate'?: (Buffer | Uint8Array | string); + /** + * the certificate used by the remote endpoint. + */ + 'remote_certificate'?: (Buffer | Uint8Array | string); + 'cipher_suite'?: "standard_name"|"other_name"; +} + +export interface _grpc_channelz_v1_Security_Tls__Output { + /** + * The cipher suite name in the RFC 4346 format: + * https://tools.ietf.org/html/rfc4346#appendix-C + */ + 'standard_name'?: (string); + /** + * Some other way to describe the cipher suite if + * the RFC 4346 name is not available. + */ + 'other_name'?: (string); + /** + * the certificate used by this endpoint. + */ + 'local_certificate': (Buffer); + /** + * the certificate used by the remote endpoint. + */ + 'remote_certificate': (Buffer); + 'cipher_suite': "standard_name"|"other_name"; +} + +/** + * Security represents details about how secure the socket is. + */ +export interface Security { + 'tls'?: (_grpc_channelz_v1_Security_Tls | null); + 'other'?: (_grpc_channelz_v1_Security_OtherSecurity | null); + 'model'?: "tls"|"other"; +} + +/** + * Security represents details about how secure the socket is. + */ +export interface Security__Output { + 'tls'?: (_grpc_channelz_v1_Security_Tls__Output | null); + 'other'?: (_grpc_channelz_v1_Security_OtherSecurity__Output | null); + 'model': "tls"|"other"; +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/Server.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/Server.ts new file mode 100644 index 000000000..958343358 --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/Server.ts @@ -0,0 +1,45 @@ +// Original file: proto/channelz.proto + +import type { ServerRef as _grpc_channelz_v1_ServerRef, ServerRef__Output as _grpc_channelz_v1_ServerRef__Output } from '../../../grpc/channelz/v1/ServerRef'; +import type { ServerData as _grpc_channelz_v1_ServerData, ServerData__Output as _grpc_channelz_v1_ServerData__Output } from '../../../grpc/channelz/v1/ServerData'; +import type { SocketRef as _grpc_channelz_v1_SocketRef, SocketRef__Output as _grpc_channelz_v1_SocketRef__Output } from '../../../grpc/channelz/v1/SocketRef'; + +/** + * Server represents a single server. There may be multiple servers in a single + * program. + */ +export interface Server { + /** + * The identifier for a Server. This should be set. + */ + 'ref'?: (_grpc_channelz_v1_ServerRef | null); + /** + * The associated data of the Server. + */ + 'data'?: (_grpc_channelz_v1_ServerData | null); + /** + * The sockets that the server is listening on. There are no ordering + * guarantees. This may be absent. + */ + 'listen_socket'?: (_grpc_channelz_v1_SocketRef)[]; +} + +/** + * Server represents a single server. There may be multiple servers in a single + * program. + */ +export interface Server__Output { + /** + * The identifier for a Server. This should be set. + */ + 'ref': (_grpc_channelz_v1_ServerRef__Output | null); + /** + * The associated data of the Server. + */ + 'data': (_grpc_channelz_v1_ServerData__Output | null); + /** + * The sockets that the server is listening on. There are no ordering + * guarantees. This may be absent. + */ + 'listen_socket': (_grpc_channelz_v1_SocketRef__Output)[]; +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/ServerData.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/ServerData.ts new file mode 100644 index 000000000..ce48e36f5 --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/ServerData.ts @@ -0,0 +1,57 @@ +// Original file: proto/channelz.proto + +import type { ChannelTrace as _grpc_channelz_v1_ChannelTrace, ChannelTrace__Output as _grpc_channelz_v1_ChannelTrace__Output } from '../../../grpc/channelz/v1/ChannelTrace'; +import type { Timestamp as _google_protobuf_Timestamp, Timestamp__Output as _google_protobuf_Timestamp__Output } from '../../../google/protobuf/Timestamp'; +import type { Long } from '@grpc/proto-loader'; + +/** + * ServerData is data for a specific Server. + */ +export interface ServerData { + /** + * A trace of recent events on the server. May be absent. + */ + 'trace'?: (_grpc_channelz_v1_ChannelTrace | null); + /** + * The number of incoming calls started on the server + */ + 'calls_started'?: (number | string | Long); + /** + * The number of incoming calls that have completed with an OK status + */ + 'calls_succeeded'?: (number | string | Long); + /** + * The number of incoming calls that have a completed with a non-OK status + */ + 'calls_failed'?: (number | string | Long); + /** + * The last time a call was started on the server. + */ + 'last_call_started_timestamp'?: (_google_protobuf_Timestamp | null); +} + +/** + * ServerData is data for a specific Server. + */ +export interface ServerData__Output { + /** + * A trace of recent events on the server. May be absent. + */ + 'trace': (_grpc_channelz_v1_ChannelTrace__Output | null); + /** + * The number of incoming calls started on the server + */ + 'calls_started': (string); + /** + * The number of incoming calls that have completed with an OK status + */ + 'calls_succeeded': (string); + /** + * The number of incoming calls that have a completed with a non-OK status + */ + 'calls_failed': (string); + /** + * The last time a call was started on the server. + */ + 'last_call_started_timestamp': (_google_protobuf_Timestamp__Output | null); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/ServerRef.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/ServerRef.ts new file mode 100644 index 000000000..389183bdc --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/ServerRef.ts @@ -0,0 +1,31 @@ +// Original file: proto/channelz.proto + +import type { Long } from '@grpc/proto-loader'; + +/** + * ServerRef is a reference to a Server. + */ +export interface ServerRef { + /** + * A globally unique identifier for this server. Must be a positive number. + */ + 'server_id'?: (number | string | Long); + /** + * An optional name associated with the server. + */ + 'name'?: (string); +} + +/** + * ServerRef is a reference to a Server. + */ +export interface ServerRef__Output { + /** + * A globally unique identifier for this server. Must be a positive number. + */ + 'server_id': (string); + /** + * An optional name associated with the server. + */ + 'name': (string); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/Socket.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/Socket.ts new file mode 100644 index 000000000..5829afe98 --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/Socket.ts @@ -0,0 +1,70 @@ +// Original file: proto/channelz.proto + +import type { SocketRef as _grpc_channelz_v1_SocketRef, SocketRef__Output as _grpc_channelz_v1_SocketRef__Output } from '../../../grpc/channelz/v1/SocketRef'; +import type { SocketData as _grpc_channelz_v1_SocketData, SocketData__Output as _grpc_channelz_v1_SocketData__Output } from '../../../grpc/channelz/v1/SocketData'; +import type { Address as _grpc_channelz_v1_Address, Address__Output as _grpc_channelz_v1_Address__Output } from '../../../grpc/channelz/v1/Address'; +import type { Security as _grpc_channelz_v1_Security, Security__Output as _grpc_channelz_v1_Security__Output } from '../../../grpc/channelz/v1/Security'; + +/** + * Information about an actual connection. Pronounced "sock-ay". + */ +export interface Socket { + /** + * The identifier for the Socket. + */ + 'ref'?: (_grpc_channelz_v1_SocketRef | null); + /** + * Data specific to this Socket. + */ + 'data'?: (_grpc_channelz_v1_SocketData | null); + /** + * The locally bound address. + */ + 'local'?: (_grpc_channelz_v1_Address | null); + /** + * The remote bound address. May be absent. + */ + 'remote'?: (_grpc_channelz_v1_Address | null); + /** + * Security details for this socket. May be absent if not available, or + * there is no security on the socket. + */ + 'security'?: (_grpc_channelz_v1_Security | null); + /** + * Optional, represents the name of the remote endpoint, if different than + * the original target name. + */ + 'remote_name'?: (string); +} + +/** + * Information about an actual connection. Pronounced "sock-ay". + */ +export interface Socket__Output { + /** + * The identifier for the Socket. + */ + 'ref': (_grpc_channelz_v1_SocketRef__Output | null); + /** + * Data specific to this Socket. + */ + 'data': (_grpc_channelz_v1_SocketData__Output | null); + /** + * The locally bound address. + */ + 'local': (_grpc_channelz_v1_Address__Output | null); + /** + * The remote bound address. May be absent. + */ + 'remote': (_grpc_channelz_v1_Address__Output | null); + /** + * Security details for this socket. May be absent if not available, or + * there is no security on the socket. + */ + 'security': (_grpc_channelz_v1_Security__Output | null); + /** + * Optional, represents the name of the remote endpoint, if different than + * the original target name. + */ + 'remote_name': (string); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/SocketData.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/SocketData.ts new file mode 100644 index 000000000..c62d4d10c --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/SocketData.ts @@ -0,0 +1,150 @@ +// Original file: proto/channelz.proto + +import type { Timestamp as _google_protobuf_Timestamp, Timestamp__Output as _google_protobuf_Timestamp__Output } from '../../../google/protobuf/Timestamp'; +import type { Int64Value as _google_protobuf_Int64Value, Int64Value__Output as _google_protobuf_Int64Value__Output } from '../../../google/protobuf/Int64Value'; +import type { SocketOption as _grpc_channelz_v1_SocketOption, SocketOption__Output as _grpc_channelz_v1_SocketOption__Output } from '../../../grpc/channelz/v1/SocketOption'; +import type { Long } from '@grpc/proto-loader'; + +/** + * SocketData is data associated for a specific Socket. The fields present + * are specific to the implementation, so there may be minor differences in + * the semantics. (e.g. flow control windows) + */ +export interface SocketData { + /** + * The number of streams that have been started. + */ + 'streams_started'?: (number | string | Long); + /** + * The number of streams that have ended successfully: + * On client side, received frame with eos bit set; + * On server side, sent frame with eos bit set. + */ + 'streams_succeeded'?: (number | string | Long); + /** + * The number of streams that have ended unsuccessfully: + * On client side, ended without receiving frame with eos bit set; + * On server side, ended without sending frame with eos bit set. + */ + 'streams_failed'?: (number | string | Long); + /** + * The number of grpc messages successfully sent on this socket. + */ + 'messages_sent'?: (number | string | Long); + /** + * The number of grpc messages received on this socket. + */ + 'messages_received'?: (number | string | Long); + /** + * The number of keep alives sent. This is typically implemented with HTTP/2 + * ping messages. + */ + 'keep_alives_sent'?: (number | string | Long); + /** + * The last time a stream was created by this endpoint. Usually unset for + * servers. + */ + 'last_local_stream_created_timestamp'?: (_google_protobuf_Timestamp | null); + /** + * The last time a stream was created by the remote endpoint. Usually unset + * for clients. + */ + 'last_remote_stream_created_timestamp'?: (_google_protobuf_Timestamp | null); + /** + * The last time a message was sent by this endpoint. + */ + 'last_message_sent_timestamp'?: (_google_protobuf_Timestamp | null); + /** + * The last time a message was received by this endpoint. + */ + 'last_message_received_timestamp'?: (_google_protobuf_Timestamp | null); + /** + * The amount of window, granted to the local endpoint by the remote endpoint. + * This may be slightly out of date due to network latency. This does NOT + * include stream level or TCP level flow control info. + */ + 'local_flow_control_window'?: (_google_protobuf_Int64Value | null); + /** + * The amount of window, granted to the remote endpoint by the local endpoint. + * This may be slightly out of date due to network latency. This does NOT + * include stream level or TCP level flow control info. + */ + 'remote_flow_control_window'?: (_google_protobuf_Int64Value | null); + /** + * Socket options set on this socket. May be absent if 'summary' is set + * on GetSocketRequest. + */ + 'option'?: (_grpc_channelz_v1_SocketOption)[]; +} + +/** + * SocketData is data associated for a specific Socket. The fields present + * are specific to the implementation, so there may be minor differences in + * the semantics. (e.g. flow control windows) + */ +export interface SocketData__Output { + /** + * The number of streams that have been started. + */ + 'streams_started': (string); + /** + * The number of streams that have ended successfully: + * On client side, received frame with eos bit set; + * On server side, sent frame with eos bit set. + */ + 'streams_succeeded': (string); + /** + * The number of streams that have ended unsuccessfully: + * On client side, ended without receiving frame with eos bit set; + * On server side, ended without sending frame with eos bit set. + */ + 'streams_failed': (string); + /** + * The number of grpc messages successfully sent on this socket. + */ + 'messages_sent': (string); + /** + * The number of grpc messages received on this socket. + */ + 'messages_received': (string); + /** + * The number of keep alives sent. This is typically implemented with HTTP/2 + * ping messages. + */ + 'keep_alives_sent': (string); + /** + * The last time a stream was created by this endpoint. Usually unset for + * servers. + */ + 'last_local_stream_created_timestamp': (_google_protobuf_Timestamp__Output | null); + /** + * The last time a stream was created by the remote endpoint. Usually unset + * for clients. + */ + 'last_remote_stream_created_timestamp': (_google_protobuf_Timestamp__Output | null); + /** + * The last time a message was sent by this endpoint. + */ + 'last_message_sent_timestamp': (_google_protobuf_Timestamp__Output | null); + /** + * The last time a message was received by this endpoint. + */ + 'last_message_received_timestamp': (_google_protobuf_Timestamp__Output | null); + /** + * The amount of window, granted to the local endpoint by the remote endpoint. + * This may be slightly out of date due to network latency. This does NOT + * include stream level or TCP level flow control info. + */ + 'local_flow_control_window': (_google_protobuf_Int64Value__Output | null); + /** + * The amount of window, granted to the remote endpoint by the local endpoint. + * This may be slightly out of date due to network latency. This does NOT + * include stream level or TCP level flow control info. + */ + 'remote_flow_control_window': (_google_protobuf_Int64Value__Output | null); + /** + * Socket options set on this socket. May be absent if 'summary' is set + * on GetSocketRequest. + */ + 'option': (_grpc_channelz_v1_SocketOption__Output)[]; +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/SocketOption.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/SocketOption.ts new file mode 100644 index 000000000..115b36aae --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/SocketOption.ts @@ -0,0 +1,47 @@ +// Original file: proto/channelz.proto + +import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../../google/protobuf/Any'; + +/** + * SocketOption represents socket options for a socket. Specifically, these + * are the options returned by getsockopt(). + */ +export interface SocketOption { + /** + * The full name of the socket option. Typically this will be the upper case + * name, such as "SO_REUSEPORT". + */ + 'name'?: (string); + /** + * The human readable value of this socket option. At least one of value or + * additional will be set. + */ + 'value'?: (string); + /** + * Additional data associated with the socket option. At least one of value + * or additional will be set. + */ + 'additional'?: (_google_protobuf_Any | null); +} + +/** + * SocketOption represents socket options for a socket. Specifically, these + * are the options returned by getsockopt(). + */ +export interface SocketOption__Output { + /** + * The full name of the socket option. Typically this will be the upper case + * name, such as "SO_REUSEPORT". + */ + 'name': (string); + /** + * The human readable value of this socket option. At least one of value or + * additional will be set. + */ + 'value': (string); + /** + * Additional data associated with the socket option. At least one of value + * or additional will be set. + */ + 'additional': (_google_protobuf_Any__Output | null); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/SocketOptionLinger.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/SocketOptionLinger.ts new file mode 100644 index 000000000..d83fa3238 --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/SocketOptionLinger.ts @@ -0,0 +1,33 @@ +// Original file: proto/channelz.proto + +import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../google/protobuf/Duration'; + +/** + * For use with SocketOption's additional field. This is primarily used for + * SO_LINGER. + */ +export interface SocketOptionLinger { + /** + * active maps to `struct linger.l_onoff` + */ + 'active'?: (boolean); + /** + * duration maps to `struct linger.l_linger` + */ + 'duration'?: (_google_protobuf_Duration | null); +} + +/** + * For use with SocketOption's additional field. This is primarily used for + * SO_LINGER. + */ +export interface SocketOptionLinger__Output { + /** + * active maps to `struct linger.l_onoff` + */ + 'active': (boolean); + /** + * duration maps to `struct linger.l_linger` + */ + 'duration': (_google_protobuf_Duration__Output | null); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/SocketOptionTcpInfo.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/SocketOptionTcpInfo.ts new file mode 100644 index 000000000..2f8affe80 --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/SocketOptionTcpInfo.ts @@ -0,0 +1,74 @@ +// Original file: proto/channelz.proto + + +/** + * For use with SocketOption's additional field. Tcp info for + * SOL_TCP and TCP_INFO. + */ +export interface SocketOptionTcpInfo { + 'tcpi_state'?: (number); + 'tcpi_ca_state'?: (number); + 'tcpi_retransmits'?: (number); + 'tcpi_probes'?: (number); + 'tcpi_backoff'?: (number); + 'tcpi_options'?: (number); + 'tcpi_snd_wscale'?: (number); + 'tcpi_rcv_wscale'?: (number); + 'tcpi_rto'?: (number); + 'tcpi_ato'?: (number); + 'tcpi_snd_mss'?: (number); + 'tcpi_rcv_mss'?: (number); + 'tcpi_unacked'?: (number); + 'tcpi_sacked'?: (number); + 'tcpi_lost'?: (number); + 'tcpi_retrans'?: (number); + 'tcpi_fackets'?: (number); + 'tcpi_last_data_sent'?: (number); + 'tcpi_last_ack_sent'?: (number); + 'tcpi_last_data_recv'?: (number); + 'tcpi_last_ack_recv'?: (number); + 'tcpi_pmtu'?: (number); + 'tcpi_rcv_ssthresh'?: (number); + 'tcpi_rtt'?: (number); + 'tcpi_rttvar'?: (number); + 'tcpi_snd_ssthresh'?: (number); + 'tcpi_snd_cwnd'?: (number); + 'tcpi_advmss'?: (number); + 'tcpi_reordering'?: (number); +} + +/** + * For use with SocketOption's additional field. Tcp info for + * SOL_TCP and TCP_INFO. + */ +export interface SocketOptionTcpInfo__Output { + 'tcpi_state': (number); + 'tcpi_ca_state': (number); + 'tcpi_retransmits': (number); + 'tcpi_probes': (number); + 'tcpi_backoff': (number); + 'tcpi_options': (number); + 'tcpi_snd_wscale': (number); + 'tcpi_rcv_wscale': (number); + 'tcpi_rto': (number); + 'tcpi_ato': (number); + 'tcpi_snd_mss': (number); + 'tcpi_rcv_mss': (number); + 'tcpi_unacked': (number); + 'tcpi_sacked': (number); + 'tcpi_lost': (number); + 'tcpi_retrans': (number); + 'tcpi_fackets': (number); + 'tcpi_last_data_sent': (number); + 'tcpi_last_ack_sent': (number); + 'tcpi_last_data_recv': (number); + 'tcpi_last_ack_recv': (number); + 'tcpi_pmtu': (number); + 'tcpi_rcv_ssthresh': (number); + 'tcpi_rtt': (number); + 'tcpi_rttvar': (number); + 'tcpi_snd_ssthresh': (number); + 'tcpi_snd_cwnd': (number); + 'tcpi_advmss': (number); + 'tcpi_reordering': (number); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/SocketOptionTimeout.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/SocketOptionTimeout.ts new file mode 100644 index 000000000..185839b2c --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/SocketOptionTimeout.ts @@ -0,0 +1,19 @@ +// Original file: proto/channelz.proto + +import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../google/protobuf/Duration'; + +/** + * For use with SocketOption's additional field. This is primarily used for + * SO_RCVTIMEO and SO_SNDTIMEO + */ +export interface SocketOptionTimeout { + 'duration'?: (_google_protobuf_Duration | null); +} + +/** + * For use with SocketOption's additional field. This is primarily used for + * SO_RCVTIMEO and SO_SNDTIMEO + */ +export interface SocketOptionTimeout__Output { + 'duration': (_google_protobuf_Duration__Output | null); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/SocketRef.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/SocketRef.ts new file mode 100644 index 000000000..52fdb2bd3 --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/SocketRef.ts @@ -0,0 +1,31 @@ +// Original file: proto/channelz.proto + +import type { Long } from '@grpc/proto-loader'; + +/** + * SocketRef is a reference to a Socket. + */ +export interface SocketRef { + /** + * The globally unique id for this socket. Must be a positive number. + */ + 'socket_id'?: (number | string | Long); + /** + * An optional name associated with the socket. + */ + 'name'?: (string); +} + +/** + * SocketRef is a reference to a Socket. + */ +export interface SocketRef__Output { + /** + * The globally unique id for this socket. Must be a positive number. + */ + 'socket_id': (string); + /** + * An optional name associated with the socket. + */ + 'name': (string); +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/Subchannel.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/Subchannel.ts new file mode 100644 index 000000000..7122fac83 --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/Subchannel.ts @@ -0,0 +1,70 @@ +// Original file: proto/channelz.proto + +import type { SubchannelRef as _grpc_channelz_v1_SubchannelRef, SubchannelRef__Output as _grpc_channelz_v1_SubchannelRef__Output } from '../../../grpc/channelz/v1/SubchannelRef'; +import type { ChannelData as _grpc_channelz_v1_ChannelData, ChannelData__Output as _grpc_channelz_v1_ChannelData__Output } from '../../../grpc/channelz/v1/ChannelData'; +import type { ChannelRef as _grpc_channelz_v1_ChannelRef, ChannelRef__Output as _grpc_channelz_v1_ChannelRef__Output } from '../../../grpc/channelz/v1/ChannelRef'; +import type { SocketRef as _grpc_channelz_v1_SocketRef, SocketRef__Output as _grpc_channelz_v1_SocketRef__Output } from '../../../grpc/channelz/v1/SocketRef'; + +/** + * Subchannel is a logical grouping of channels, subchannels, and sockets. + * A subchannel is load balanced over by it's ancestor + */ +export interface Subchannel { + /** + * The identifier for this channel. + */ + 'ref'?: (_grpc_channelz_v1_SubchannelRef | null); + /** + * Data specific to this channel. + */ + 'data'?: (_grpc_channelz_v1_ChannelData | null); + /** + * There are no ordering guarantees on the order of channel refs. + * There may not be cycles in the ref graph. + * A channel ref may be present in more than one channel or subchannel. + */ + 'channel_ref'?: (_grpc_channelz_v1_ChannelRef)[]; + /** + * At most one of 'channel_ref+subchannel_ref' and 'socket' is set. + * There are no ordering guarantees on the order of subchannel refs. + * There may not be cycles in the ref graph. + * A sub channel ref may be present in more than one channel or subchannel. + */ + 'subchannel_ref'?: (_grpc_channelz_v1_SubchannelRef)[]; + /** + * There are no ordering guarantees on the order of sockets. + */ + 'socket_ref'?: (_grpc_channelz_v1_SocketRef)[]; +} + +/** + * Subchannel is a logical grouping of channels, subchannels, and sockets. + * A subchannel is load balanced over by it's ancestor + */ +export interface Subchannel__Output { + /** + * The identifier for this channel. + */ + 'ref': (_grpc_channelz_v1_SubchannelRef__Output | null); + /** + * Data specific to this channel. + */ + 'data': (_grpc_channelz_v1_ChannelData__Output | null); + /** + * There are no ordering guarantees on the order of channel refs. + * There may not be cycles in the ref graph. + * A channel ref may be present in more than one channel or subchannel. + */ + 'channel_ref': (_grpc_channelz_v1_ChannelRef__Output)[]; + /** + * At most one of 'channel_ref+subchannel_ref' and 'socket' is set. + * There are no ordering guarantees on the order of subchannel refs. + * There may not be cycles in the ref graph. + * A sub channel ref may be present in more than one channel or subchannel. + */ + 'subchannel_ref': (_grpc_channelz_v1_SubchannelRef__Output)[]; + /** + * There are no ordering guarantees on the order of sockets. + */ + 'socket_ref': (_grpc_channelz_v1_SocketRef__Output)[]; +} diff --git a/packages/grpc-js/src/generated/grpc/channelz/v1/SubchannelRef.ts b/packages/grpc-js/src/generated/grpc/channelz/v1/SubchannelRef.ts new file mode 100644 index 000000000..b6911c773 --- /dev/null +++ b/packages/grpc-js/src/generated/grpc/channelz/v1/SubchannelRef.ts @@ -0,0 +1,31 @@ +// Original file: proto/channelz.proto + +import type { Long } from '@grpc/proto-loader'; + +/** + * SubchannelRef is a reference to a Subchannel. + */ +export interface SubchannelRef { + /** + * The globally unique id for this subchannel. Must be a positive number. + */ + 'subchannel_id'?: (number | string | Long); + /** + * An optional name associated with the subchannel. + */ + 'name'?: (string); +} + +/** + * SubchannelRef is a reference to a Subchannel. + */ +export interface SubchannelRef__Output { + /** + * The globally unique id for this subchannel. Must be a positive number. + */ + 'subchannel_id': (string); + /** + * An optional name associated with the subchannel. + */ + 'name': (string); +} diff --git a/packages/grpc-js/src/http_proxy.ts b/packages/grpc-js/src/http_proxy.ts index 6e62eddd0..3e905c488 100644 --- a/packages/grpc-js/src/http_proxy.ts +++ b/packages/grpc-js/src/http_proxy.ts @@ -26,10 +26,11 @@ import { SubchannelAddress, isTcpSubchannelAddress, subchannelAddressToString, -} from './subchannel'; +} from './subchannel-address'; import { ChannelOptions } from './channel-options'; import { GrpcUri, parseUri, splitHostPort, uriToString } from './uri-parser'; import { URL } from 'url'; +import { DEFAULT_PORT } from './resolver-dns'; const TRACER_NAME = 'proxy'; @@ -93,7 +94,7 @@ function getProxyInfo(): ProxyInfo { port = '80'; } const result: ProxyInfo = { - address: `${hostname}:${port}` + address: `${hostname}:${port}`, }; if (userCred) { result.creds = userCred; @@ -136,6 +137,9 @@ export function mapProxyName( if ((options['grpc.enable_http_proxy'] ?? 1) === 0) { return noProxyResult; } + if (target.scheme === 'unix') { + return noProxyResult; + } const proxyInfo = getProxyInfo(); if (!proxyInfo.address) { return noProxyResult; @@ -147,7 +151,9 @@ export function mapProxyName( const serverHost = hostPort.host; for (const host of getNoProxyHostList()) { if (host === serverHost) { - trace('Not using proxy for target in no_proxy list: ' + uriToString(target)); + trace( + 'Not using proxy for target in no_proxy list: ' + uriToString(target) + ); return noProxyResult; } } @@ -184,9 +190,19 @@ export function getProxiedConnection( if (parsedTarget === null) { return Promise.resolve({}); } + const splitHostPost = splitHostPort(parsedTarget.path); + if (splitHostPost === null) { + return Promise.resolve({}); + } + const hostPort = `${splitHostPost.host}:${ + splitHostPost.port ?? DEFAULT_PORT + }`; const options: http.RequestOptions = { method: 'CONNECT', - path: parsedTarget.path, + path: hostPort, + }; + const headers: http.OutgoingHttpHeaders = { + Host: hostPort, }; // Connect to the subchannel address as a proxy if (isTcpSubchannelAddress(address)) { @@ -196,14 +212,13 @@ export function getProxiedConnection( options.socketPath = address.path; } if ('grpc.http_connect_creds' in channelOptions) { - options.headers = { - 'Proxy-Authorization': - 'Basic ' + - Buffer.from( - channelOptions['grpc.http_connect_creds'] as string - ).toString('base64'), - }; + headers['Proxy-Authorization'] = + 'Basic ' + + Buffer.from(channelOptions['grpc.http_connect_creds'] as string).toString( + 'base64' + ); } + options.headers = headers; const proxyAddressString = subchannelAddressToString(address); trace('Using proxy ' + proxyAddressString + ' to connect to ' + options.path); return new Promise((resolve, reject) => { @@ -226,7 +241,7 @@ export function getProxiedConnection( const targetPath = getDefaultAuthority(parsedTarget); const hostPort = splitHostPort(targetPath); const remoteHost = hostPort?.host ?? targetPath; - + const cts = tls.connect( { host: remoteHost, @@ -245,12 +260,14 @@ export function getProxiedConnection( } ); cts.on('error', (error: Error) => { - trace('Failed to establish a TLS connection to ' + - options.path + - ' through proxy ' + - proxyAddressString + - ' with error ' + - error.message); + trace( + 'Failed to establish a TLS connection to ' + + options.path + + ' through proxy ' + + proxyAddressString + + ' with error ' + + error.message + ); reject(); }); } else { @@ -278,7 +295,7 @@ export function getProxiedConnection( reject(); } }); - request.once('error', (err) => { + request.once('error', err => { request.removeAllListeners(); log( LogVerbosity.ERROR, diff --git a/packages/grpc-js/src/index.ts b/packages/grpc-js/src/index.ts index 05288855c..c766a3718 100644 --- a/packages/grpc-js/src/index.ts +++ b/packages/grpc-js/src/index.ts @@ -23,9 +23,11 @@ import { ServiceError, } from './call'; import { CallCredentials, OAuth2Client } from './call-credentials'; -import { Deadline, StatusObject } from './call-stream'; -import { Channel, ConnectivityState, ChannelImplementation } from './channel'; -import { ChannelCredentials } from './channel-credentials'; +import { StatusObject } from './call-interface'; +import { Channel, ChannelImplementation } from './channel'; +import { CompressionAlgorithms } from './compression-algorithms'; +import { ConnectivityState } from './connectivity-state'; +import { ChannelCredentials, VerifyOptions } from './channel-credentials'; import { CallOptions, Client, @@ -44,9 +46,10 @@ import { Serialize, ServiceDefinition, } from './make-client'; -import { Metadata, MetadataValue } from './metadata'; +import { Metadata, MetadataOptions, MetadataValue } from './metadata'; import { Server, + ServerOptions, UntypedHandleCall, UntypedServiceImplementation, } from './server'; @@ -62,6 +65,7 @@ import { ServerReadableStream, ServerWritableStream, ServerDuplexStream, + ServerErrorResponse, } from './server-call'; export { OAuth2Client }; @@ -104,6 +108,7 @@ export const credentials = { // from channel-credentials.ts createInsecure: ChannelCredentials.createInsecure, createSsl: ChannelCredentials.createSsl, + createFromSecureContext: ChannelCredentials.createFromSecureContext, // from call-credentials.ts createFromMetadataGenerator: CallCredentials.createFromMetadataGenerator, @@ -113,7 +118,7 @@ export const credentials = { /**** Metadata ****/ -export { Metadata, MetadataValue }; +export { Metadata, MetadataOptions, MetadataValue }; /**** Constants ****/ @@ -122,6 +127,7 @@ export { Status as status, ConnectivityState as connectivityState, Propagate as propagate, + CompressionAlgorithms as compressionAlgorithms, // TODO: Other constants as well }; @@ -173,14 +179,21 @@ export { ServerReadableStream, ServerWritableStream, ServerDuplexStream, + ServerErrorResponse, ServiceDefinition, UntypedHandleCall, UntypedServiceImplementation, + VerifyOptions, }; /**** Server ****/ -export { handleBidiStreamingCall, handleServerStreamingCall, handleUnaryCall, handleClientStreamingCall }; +export { + handleBidiStreamingCall, + handleServerStreamingCall, + handleUnaryCall, + handleClientStreamingCall, +}; /* eslint-disable @typescript-eslint/no-explicit-any */ export type Call = @@ -194,13 +207,13 @@ export type Call = /* eslint-disable @typescript-eslint/no-explicit-any */ -export const loadObject = (value: any, options: any) => { +export const loadObject = (value: any, options: any): never => { throw new Error( 'Not available in this library. Use @grpc/proto-loader and loadPackageDefinition instead' ); }; -export const load = (filename: any, format: any, options: any) => { +export const load = (filename: any, format: any, options: any): never => { throw new Error( 'Not available in this library. Use @grpc/proto-loader and loadPackageDefinition instead' ); @@ -214,7 +227,7 @@ export const setLogVerbosity = (verbosity: LogVerbosity): void => { logging.setLoggerVerbosity(verbosity); }; -export { Server }; +export { Server, ServerOptions }; export { ServerCredentials }; export { KeyCertPair }; @@ -224,7 +237,7 @@ export const getClientChannel = (client: Client) => { export { StatusBuilder }; -export { Listener } from './call-stream'; +export { Listener, InterceptingListener } from './call-interface'; export { Requester, @@ -235,22 +248,58 @@ export { InterceptorProvider, InterceptingCall, InterceptorConfigurationError, + NextCall, } from './client-interceptors'; -export { GrpcObject } from './make-client'; +export { + GrpcObject, + ServiceClientConstructor, + ProtobufTypeDefinition, +} from './make-client'; export { ChannelOptions } from './channel-options'; +export { getChannelzServiceDefinition, getChannelzHandlers } from './channelz'; + +export { addAdminServicesToServer } from './admin'; + +export { + ServiceConfig, + LoadBalancingConfig, + MethodConfig, + RetryPolicy, +} from './service-config'; + +export { + ServerListener, + FullServerListener, + ServerListenerBuilder, + Responder, + FullResponder, + ResponderBuilder, + ServerInterceptingCallInterface, + ServerInterceptingCall, + ServerInterceptor, +} from './server-interceptors'; + import * as experimental from './experimental'; export { experimental }; -import * as resolver from './resolver'; -import * as load_balancer from './load-balancer'; - -const clientVersion = require('../../package.json').version; +import * as resolver_dns from './resolver-dns'; +import * as resolver_uds from './resolver-uds'; +import * as resolver_ip from './resolver-ip'; +import * as load_balancer_pick_first from './load-balancer-pick-first'; +import * as load_balancer_round_robin from './load-balancer-round-robin'; +import * as load_balancer_outlier_detection from './load-balancer-outlier-detection'; +import * as channelz from './channelz'; +import { Deadline } from './deadline'; (() => { - logging.trace(LogVerbosity.DEBUG, 'index', 'Loading @grpc/grpc-js version ' + clientVersion); - resolver.registerAll(); - load_balancer.registerAll(); + resolver_dns.setup(); + resolver_uds.setup(); + resolver_ip.setup(); + load_balancer_pick_first.setup(); + load_balancer_round_robin.setup(); + load_balancer_outlier_detection.setup(); + channelz.setup(); })(); diff --git a/packages/grpc-js/src/internal-channel.ts b/packages/grpc-js/src/internal-channel.ts new file mode 100644 index 000000000..e0cebd469 --- /dev/null +++ b/packages/grpc-js/src/internal-channel.ts @@ -0,0 +1,843 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { ChannelCredentials } from './channel-credentials'; +import { ChannelOptions } from './channel-options'; +import { ResolvingLoadBalancer } from './resolving-load-balancer'; +import { SubchannelPool, getSubchannelPool } from './subchannel-pool'; +import { ChannelControlHelper } from './load-balancer'; +import { UnavailablePicker, Picker, QueuePicker } from './picker'; +import { Metadata } from './metadata'; +import { Status, LogVerbosity, Propagate } from './constants'; +import { FilterStackFactory } from './filter-stack'; +import { CompressionFilterFactory } from './compression-filter'; +import { + CallConfig, + ConfigSelector, + getDefaultAuthority, + mapUriDefaultScheme, +} from './resolver'; +import { trace } from './logging'; +import { SubchannelAddress } from './subchannel-address'; +import { mapProxyName } from './http_proxy'; +import { GrpcUri, parseUri, uriToString } from './uri-parser'; +import { ServerSurfaceCall } from './server-call'; + +import { ConnectivityState } from './connectivity-state'; +import { + ChannelInfo, + ChannelRef, + ChannelzCallTracker, + ChannelzChildrenTracker, + ChannelzTrace, + registerChannelzChannel, + SubchannelRef, + unregisterChannelzRef, +} from './channelz'; +import { LoadBalancingCall } from './load-balancing-call'; +import { CallCredentials } from './call-credentials'; +import { Call, CallStreamOptions, StatusObject } from './call-interface'; +import { Deadline, deadlineToString } from './deadline'; +import { ResolvingCall } from './resolving-call'; +import { getNextCallNumber } from './call-number'; +import { restrictControlPlaneStatusCode } from './control-plane-status'; +import { + MessageBufferTracker, + RetryingCall, + RetryThrottler, +} from './retrying-call'; +import { + BaseSubchannelWrapper, + ConnectivityStateListener, + SubchannelInterface, +} from './subchannel-interface'; + +/** + * See https://nodejs.org/api/timers.html#timers_setinterval_callback_delay_args + */ +const MAX_TIMEOUT_TIME = 2147483647; + +const MIN_IDLE_TIMEOUT_MS = 1000; + +// 30 minutes +const DEFAULT_IDLE_TIMEOUT_MS = 30 * 60 * 1000; + +interface ConnectivityStateWatcher { + currentState: ConnectivityState; + timer: NodeJS.Timeout | null; + callback: (error?: Error) => void; +} + +interface NoneConfigResult { + type: 'NONE'; +} + +interface SuccessConfigResult { + type: 'SUCCESS'; + config: CallConfig; +} + +interface ErrorConfigResult { + type: 'ERROR'; + error: StatusObject; +} + +type GetConfigResult = + | NoneConfigResult + | SuccessConfigResult + | ErrorConfigResult; + +const RETRY_THROTTLER_MAP: Map = new Map(); + +const DEFAULT_RETRY_BUFFER_SIZE_BYTES = 1 << 24; // 16 MB +const DEFAULT_PER_RPC_RETRY_BUFFER_SIZE_BYTES = 1 << 20; // 1 MB + +class ChannelSubchannelWrapper + extends BaseSubchannelWrapper + implements SubchannelInterface +{ + private refCount = 0; + private subchannelStateListener: ConnectivityStateListener; + constructor( + childSubchannel: SubchannelInterface, + private channel: InternalChannel + ) { + super(childSubchannel); + this.subchannelStateListener = ( + subchannel, + previousState, + newState, + keepaliveTime + ) => { + channel.throttleKeepalive(keepaliveTime); + }; + childSubchannel.addConnectivityStateListener(this.subchannelStateListener); + } + + ref(): void { + this.child.ref(); + this.refCount += 1; + } + + unref(): void { + this.child.unref(); + this.refCount -= 1; + if (this.refCount <= 0) { + this.child.removeConnectivityStateListener(this.subchannelStateListener); + this.channel.removeWrappedSubchannel(this); + } + } +} + +export class InternalChannel { + private readonly resolvingLoadBalancer: ResolvingLoadBalancer; + private readonly subchannelPool: SubchannelPool; + private connectivityState: ConnectivityState = ConnectivityState.IDLE; + private currentPicker: Picker = new UnavailablePicker(); + /** + * Calls queued up to get a call config. Should only be populated before the + * first time the resolver returns a result, which includes the ConfigSelector. + */ + private configSelectionQueue: ResolvingCall[] = []; + private pickQueue: LoadBalancingCall[] = []; + private connectivityStateWatchers: ConnectivityStateWatcher[] = []; + private readonly defaultAuthority: string; + private readonly filterStackFactory: FilterStackFactory; + private readonly target: GrpcUri; + /** + * This timer does not do anything on its own. Its purpose is to hold the + * event loop open while there are any pending calls for the channel that + * have not yet been assigned to specific subchannels. In other words, + * the invariant is that callRefTimer is reffed if and only if pickQueue + * is non-empty. + */ + private readonly callRefTimer: NodeJS.Timeout; + private configSelector: ConfigSelector | null = null; + /** + * This is the error from the name resolver if it failed most recently. It + * is only used to end calls that start while there is no config selector + * and the name resolver is in backoff, so it should be nulled if + * configSelector becomes set or the channel state becomes anything other + * than TRANSIENT_FAILURE. + */ + private currentResolutionError: StatusObject | null = null; + private readonly retryBufferTracker: MessageBufferTracker; + private keepaliveTime: number; + private readonly wrappedSubchannels: Set = + new Set(); + + private callCount = 0; + private idleTimer: NodeJS.Timeout | null = null; + private readonly idleTimeoutMs: number; + private lastActivityTimestamp: Date; + + // Channelz info + private readonly channelzEnabled: boolean = true; + private readonly originalTarget: string; + private readonly channelzRef: ChannelRef; + private readonly channelzTrace: ChannelzTrace; + private readonly callTracker = new ChannelzCallTracker(); + private readonly childrenTracker = new ChannelzChildrenTracker(); + + /** + * Randomly generated ID to be passed to the config selector, for use by + * ring_hash in xDS. An integer distributed approximately uniformly between + * 0 and MAX_SAFE_INTEGER. + */ + private readonly randomChannelId = Math.floor( + Math.random() * Number.MAX_SAFE_INTEGER + ); + + constructor( + target: string, + private readonly credentials: ChannelCredentials, + private readonly options: ChannelOptions + ) { + if (typeof target !== 'string') { + throw new TypeError('Channel target must be a string'); + } + if (!(credentials instanceof ChannelCredentials)) { + throw new TypeError( + 'Channel credentials must be a ChannelCredentials object' + ); + } + if (options) { + if (typeof options !== 'object') { + throw new TypeError('Channel options must be an object'); + } + } + this.originalTarget = target; + const originalTargetUri = parseUri(target); + if (originalTargetUri === null) { + throw new Error(`Could not parse target name "${target}"`); + } + /* This ensures that the target has a scheme that is registered with the + * resolver */ + const defaultSchemeMapResult = mapUriDefaultScheme(originalTargetUri); + if (defaultSchemeMapResult === null) { + throw new Error( + `Could not find a default scheme for target name "${target}"` + ); + } + + this.callRefTimer = setInterval(() => {}, MAX_TIMEOUT_TIME); + this.callRefTimer.unref?.(); + + if (this.options['grpc.enable_channelz'] === 0) { + this.channelzEnabled = false; + } + + this.channelzTrace = new ChannelzTrace(); + this.channelzRef = registerChannelzChannel( + target, + () => this.getChannelzInfo(), + this.channelzEnabled + ); + if (this.channelzEnabled) { + this.channelzTrace.addTrace('CT_INFO', 'Channel created'); + } + + if (this.options['grpc.default_authority']) { + this.defaultAuthority = this.options['grpc.default_authority'] as string; + } else { + this.defaultAuthority = getDefaultAuthority(defaultSchemeMapResult); + } + const proxyMapResult = mapProxyName(defaultSchemeMapResult, options); + this.target = proxyMapResult.target; + this.options = Object.assign({}, this.options, proxyMapResult.extraOptions); + + /* The global boolean parameter to getSubchannelPool has the inverse meaning to what + * the grpc.use_local_subchannel_pool channel option means. */ + this.subchannelPool = getSubchannelPool( + (options['grpc.use_local_subchannel_pool'] ?? 0) === 0 + ); + this.retryBufferTracker = new MessageBufferTracker( + options['grpc.retry_buffer_size'] ?? DEFAULT_RETRY_BUFFER_SIZE_BYTES, + options['grpc.per_rpc_retry_buffer_size'] ?? + DEFAULT_PER_RPC_RETRY_BUFFER_SIZE_BYTES + ); + this.keepaliveTime = options['grpc.keepalive_time_ms'] ?? -1; + this.idleTimeoutMs = Math.max( + options['grpc.client_idle_timeout_ms'] ?? DEFAULT_IDLE_TIMEOUT_MS, + MIN_IDLE_TIMEOUT_MS + ); + const channelControlHelper: ChannelControlHelper = { + createSubchannel: ( + subchannelAddress: SubchannelAddress, + subchannelArgs: ChannelOptions + ) => { + const subchannel = this.subchannelPool.getOrCreateSubchannel( + this.target, + subchannelAddress, + Object.assign({}, this.options, subchannelArgs), + this.credentials + ); + subchannel.throttleKeepalive(this.keepaliveTime); + if (this.channelzEnabled) { + this.channelzTrace.addTrace( + 'CT_INFO', + 'Created subchannel or used existing subchannel', + subchannel.getChannelzRef() + ); + } + const wrappedSubchannel = new ChannelSubchannelWrapper( + subchannel, + this + ); + this.wrappedSubchannels.add(wrappedSubchannel); + return wrappedSubchannel; + }, + updateState: (connectivityState: ConnectivityState, picker: Picker) => { + this.currentPicker = picker; + const queueCopy = this.pickQueue.slice(); + this.pickQueue = []; + if (queueCopy.length > 0) { + this.callRefTimerUnref(); + } + for (const call of queueCopy) { + call.doPick(); + } + this.updateState(connectivityState); + }, + requestReresolution: () => { + // This should never be called. + throw new Error( + 'Resolving load balancer should never call requestReresolution' + ); + }, + addChannelzChild: (child: ChannelRef | SubchannelRef) => { + if (this.channelzEnabled) { + this.childrenTracker.refChild(child); + } + }, + removeChannelzChild: (child: ChannelRef | SubchannelRef) => { + if (this.channelzEnabled) { + this.childrenTracker.unrefChild(child); + } + }, + }; + this.resolvingLoadBalancer = new ResolvingLoadBalancer( + this.target, + channelControlHelper, + options, + (serviceConfig, configSelector) => { + if (serviceConfig.retryThrottling) { + RETRY_THROTTLER_MAP.set( + this.getTarget(), + new RetryThrottler( + serviceConfig.retryThrottling.maxTokens, + serviceConfig.retryThrottling.tokenRatio, + RETRY_THROTTLER_MAP.get(this.getTarget()) + ) + ); + } else { + RETRY_THROTTLER_MAP.delete(this.getTarget()); + } + if (this.channelzEnabled) { + this.channelzTrace.addTrace( + 'CT_INFO', + 'Address resolution succeeded' + ); + } + this.configSelector = configSelector; + this.currentResolutionError = null; + /* We process the queue asynchronously to ensure that the corresponding + * load balancer update has completed. */ + process.nextTick(() => { + const localQueue = this.configSelectionQueue; + this.configSelectionQueue = []; + if (localQueue.length > 0) { + this.callRefTimerUnref(); + } + for (const call of localQueue) { + call.getConfig(); + } + }); + }, + status => { + if (this.channelzEnabled) { + this.channelzTrace.addTrace( + 'CT_WARNING', + 'Address resolution failed with code ' + + status.code + + ' and details "' + + status.details + + '"' + ); + } + if (this.configSelectionQueue.length > 0) { + this.trace( + 'Name resolution failed with calls queued for config selection' + ); + } + if (this.configSelector === null) { + this.currentResolutionError = { + ...restrictControlPlaneStatusCode(status.code, status.details), + metadata: status.metadata, + }; + } + const localQueue = this.configSelectionQueue; + this.configSelectionQueue = []; + if (localQueue.length > 0) { + this.callRefTimerUnref(); + } + for (const call of localQueue) { + call.reportResolverError(status); + } + } + ); + this.filterStackFactory = new FilterStackFactory([ + new CompressionFilterFactory(this, this.options), + ]); + this.trace( + 'Channel constructed with options ' + + JSON.stringify(options, undefined, 2) + ); + const error = new Error(); + trace( + LogVerbosity.DEBUG, + 'channel_stacktrace', + '(' + + this.channelzRef.id + + ') ' + + 'Channel constructed \n' + + error.stack?.substring(error.stack.indexOf('\n') + 1) + ); + this.lastActivityTimestamp = new Date(); + } + + private getChannelzInfo(): ChannelInfo { + return { + target: this.originalTarget, + state: this.connectivityState, + trace: this.channelzTrace, + callTracker: this.callTracker, + children: this.childrenTracker.getChildLists(), + }; + } + + private trace(text: string, verbosityOverride?: LogVerbosity) { + trace( + verbosityOverride ?? LogVerbosity.DEBUG, + 'channel', + '(' + this.channelzRef.id + ') ' + uriToString(this.target) + ' ' + text + ); + } + + private callRefTimerRef() { + // If the hasRef function does not exist, always run the code + if (!this.callRefTimer.hasRef?.()) { + this.trace( + 'callRefTimer.ref | configSelectionQueue.length=' + + this.configSelectionQueue.length + + ' pickQueue.length=' + + this.pickQueue.length + ); + this.callRefTimer.ref?.(); + } + } + + private callRefTimerUnref() { + // If the hasRef function does not exist, always run the code + if (!this.callRefTimer.hasRef || this.callRefTimer.hasRef()) { + this.trace( + 'callRefTimer.unref | configSelectionQueue.length=' + + this.configSelectionQueue.length + + ' pickQueue.length=' + + this.pickQueue.length + ); + this.callRefTimer.unref?.(); + } + } + + private removeConnectivityStateWatcher( + watcherObject: ConnectivityStateWatcher + ) { + const watcherIndex = this.connectivityStateWatchers.findIndex( + value => value === watcherObject + ); + if (watcherIndex >= 0) { + this.connectivityStateWatchers.splice(watcherIndex, 1); + } + } + + private updateState(newState: ConnectivityState): void { + trace( + LogVerbosity.DEBUG, + 'connectivity_state', + '(' + + this.channelzRef.id + + ') ' + + uriToString(this.target) + + ' ' + + ConnectivityState[this.connectivityState] + + ' -> ' + + ConnectivityState[newState] + ); + if (this.channelzEnabled) { + this.channelzTrace.addTrace( + 'CT_INFO', + 'Connectivity state change to ' + ConnectivityState[newState] + ); + } + this.connectivityState = newState; + const watchersCopy = this.connectivityStateWatchers.slice(); + for (const watcherObject of watchersCopy) { + if (newState !== watcherObject.currentState) { + if (watcherObject.timer) { + clearTimeout(watcherObject.timer); + } + this.removeConnectivityStateWatcher(watcherObject); + watcherObject.callback(); + } + } + if (newState !== ConnectivityState.TRANSIENT_FAILURE) { + this.currentResolutionError = null; + } + } + + throttleKeepalive(newKeepaliveTime: number) { + if (newKeepaliveTime > this.keepaliveTime) { + this.keepaliveTime = newKeepaliveTime; + for (const wrappedSubchannel of this.wrappedSubchannels) { + wrappedSubchannel.throttleKeepalive(newKeepaliveTime); + } + } + } + + removeWrappedSubchannel(wrappedSubchannel: ChannelSubchannelWrapper) { + this.wrappedSubchannels.delete(wrappedSubchannel); + } + + doPick(metadata: Metadata, extraPickInfo: { [key: string]: string }) { + return this.currentPicker.pick({ + metadata: metadata, + extraPickInfo: extraPickInfo, + }); + } + + queueCallForPick(call: LoadBalancingCall) { + this.pickQueue.push(call); + this.callRefTimerRef(); + } + + getConfig(method: string, metadata: Metadata): GetConfigResult { + this.resolvingLoadBalancer.exitIdle(); + if (this.configSelector) { + return { + type: 'SUCCESS', + config: this.configSelector(method, metadata, this.randomChannelId), + }; + } else { + if (this.currentResolutionError) { + return { + type: 'ERROR', + error: this.currentResolutionError, + }; + } else { + return { + type: 'NONE', + }; + } + } + } + + queueCallForConfig(call: ResolvingCall) { + this.configSelectionQueue.push(call); + this.callRefTimerRef(); + } + + private enterIdle() { + this.resolvingLoadBalancer.destroy(); + this.updateState(ConnectivityState.IDLE); + this.currentPicker = new QueuePicker(this.resolvingLoadBalancer); + if (this.idleTimer) { + clearTimeout(this.idleTimer); + this.idleTimer = null; + } + } + + private startIdleTimeout(timeoutMs: number) { + this.idleTimer = setTimeout(() => { + if (this.callCount > 0) { + /* If there is currently a call, the channel will not go idle for a + * period of at least idleTimeoutMs, so check again after that time. + */ + this.startIdleTimeout(this.idleTimeoutMs); + return; + } + const now = new Date(); + const timeSinceLastActivity = + now.valueOf() - this.lastActivityTimestamp.valueOf(); + if (timeSinceLastActivity >= this.idleTimeoutMs) { + this.trace( + 'Idle timer triggered after ' + + this.idleTimeoutMs + + 'ms of inactivity' + ); + this.enterIdle(); + } else { + /* Whenever the timer fires with the latest activity being too recent, + * set the timer again for the time when the time since the last + * activity is equal to the timeout. This should result in the timer + * firing no more than once every idleTimeoutMs/2 on average. */ + this.startIdleTimeout(this.idleTimeoutMs - timeSinceLastActivity); + } + }, timeoutMs); + this.idleTimer.unref?.(); + } + + private maybeStartIdleTimer() { + if ( + this.connectivityState !== ConnectivityState.SHUTDOWN && + !this.idleTimer + ) { + this.startIdleTimeout(this.idleTimeoutMs); + } + } + + private onCallStart() { + if (this.channelzEnabled) { + this.callTracker.addCallStarted(); + } + this.callCount += 1; + } + + private onCallEnd(status: StatusObject) { + if (this.channelzEnabled) { + if (status.code === Status.OK) { + this.callTracker.addCallSucceeded(); + } else { + this.callTracker.addCallFailed(); + } + } + this.callCount -= 1; + this.lastActivityTimestamp = new Date(); + this.maybeStartIdleTimer(); + } + + createLoadBalancingCall( + callConfig: CallConfig, + method: string, + host: string, + credentials: CallCredentials, + deadline: Deadline + ): LoadBalancingCall { + const callNumber = getNextCallNumber(); + this.trace( + 'createLoadBalancingCall [' + callNumber + '] method="' + method + '"' + ); + return new LoadBalancingCall( + this, + callConfig, + method, + host, + credentials, + deadline, + callNumber + ); + } + + createRetryingCall( + callConfig: CallConfig, + method: string, + host: string, + credentials: CallCredentials, + deadline: Deadline + ): RetryingCall { + const callNumber = getNextCallNumber(); + this.trace( + 'createRetryingCall [' + callNumber + '] method="' + method + '"' + ); + return new RetryingCall( + this, + callConfig, + method, + host, + credentials, + deadline, + callNumber, + this.retryBufferTracker, + RETRY_THROTTLER_MAP.get(this.getTarget()) + ); + } + + createInnerCall( + callConfig: CallConfig, + method: string, + host: string, + credentials: CallCredentials, + deadline: Deadline + ): LoadBalancingCall | RetryingCall { + // Create a RetryingCall if retries are enabled + if (this.options['grpc.enable_retries'] === 0) { + return this.createLoadBalancingCall( + callConfig, + method, + host, + credentials, + deadline + ); + } else { + return this.createRetryingCall( + callConfig, + method, + host, + credentials, + deadline + ); + } + } + + createResolvingCall( + method: string, + deadline: Deadline, + host: string | null | undefined, + parentCall: ServerSurfaceCall | null, + propagateFlags: number | null | undefined + ): ResolvingCall { + const callNumber = getNextCallNumber(); + this.trace( + 'createResolvingCall [' + + callNumber + + '] method="' + + method + + '", deadline=' + + deadlineToString(deadline) + ); + const finalOptions: CallStreamOptions = { + deadline: deadline, + flags: propagateFlags ?? Propagate.DEFAULTS, + host: host ?? this.defaultAuthority, + parentCall: parentCall, + }; + + const call = new ResolvingCall( + this, + method, + finalOptions, + this.filterStackFactory.clone(), + this.credentials._getCallCredentials(), + callNumber + ); + + this.onCallStart(); + call.addStatusWatcher(status => { + this.onCallEnd(status); + }); + return call; + } + + close() { + this.resolvingLoadBalancer.destroy(); + this.updateState(ConnectivityState.SHUTDOWN); + clearInterval(this.callRefTimer); + if (this.idleTimer) { + clearTimeout(this.idleTimer); + } + if (this.channelzEnabled) { + unregisterChannelzRef(this.channelzRef); + } + + this.subchannelPool.unrefUnusedSubchannels(); + } + + getTarget() { + return uriToString(this.target); + } + + getConnectivityState(tryToConnect: boolean) { + const connectivityState = this.connectivityState; + if (tryToConnect) { + this.resolvingLoadBalancer.exitIdle(); + this.lastActivityTimestamp = new Date(); + this.maybeStartIdleTimer(); + } + return connectivityState; + } + + watchConnectivityState( + currentState: ConnectivityState, + deadline: Date | number, + callback: (error?: Error) => void + ): void { + if (this.connectivityState === ConnectivityState.SHUTDOWN) { + throw new Error('Channel has been shut down'); + } + let timer = null; + if (deadline !== Infinity) { + const deadlineDate: Date = + deadline instanceof Date ? deadline : new Date(deadline); + const now = new Date(); + if (deadline === -Infinity || deadlineDate <= now) { + process.nextTick( + callback, + new Error('Deadline passed without connectivity state change') + ); + return; + } + timer = setTimeout(() => { + this.removeConnectivityStateWatcher(watcherObject); + callback( + new Error('Deadline passed without connectivity state change') + ); + }, deadlineDate.getTime() - now.getTime()); + } + const watcherObject = { + currentState, + callback, + timer, + }; + this.connectivityStateWatchers.push(watcherObject); + } + + /** + * Get the channelz reference object for this channel. The returned value is + * garbage if channelz is disabled for this channel. + * @returns + */ + getChannelzRef() { + return this.channelzRef; + } + + createCall( + method: string, + deadline: Deadline, + host: string | null | undefined, + parentCall: ServerSurfaceCall | null, + propagateFlags: number | null | undefined + ): Call { + if (typeof method !== 'string') { + throw new TypeError('Channel#createCall: method must be a string'); + } + if (!(typeof deadline === 'number' || deadline instanceof Date)) { + throw new TypeError( + 'Channel#createCall: deadline must be a number or Date' + ); + } + if (this.connectivityState === ConnectivityState.SHUTDOWN) { + throw new Error('Channel has been shut down'); + } + return this.createResolvingCall( + method, + deadline, + host, + parentCall, + propagateFlags + ); + } +} diff --git a/packages/grpc-js/src/load-balancer-child-handler.ts b/packages/grpc-js/src/load-balancer-child-handler.ts index 337174c0d..352ea7b81 100644 --- a/packages/grpc-js/src/load-balancer-child-handler.ts +++ b/packages/grpc-js/src/load-balancer-child-handler.ts @@ -18,19 +18,22 @@ import { LoadBalancer, ChannelControlHelper, + TypedLoadBalancingConfig, createLoadBalancer, - LoadBalancingConfig } from './load-balancer'; -import { SubchannelAddress, Subchannel } from './subchannel'; +import { Endpoint, SubchannelAddress } from './subchannel-address'; import { ChannelOptions } from './channel-options'; -import { ConnectivityState } from './channel'; +import { ConnectivityState } from './connectivity-state'; import { Picker } from './picker'; +import type { ChannelRef, SubchannelRef } from './channelz'; +import { SubchannelInterface } from './subchannel-interface'; const TYPE_NAME = 'child_load_balancer_helper'; export class ChildLoadBalancerHandler implements LoadBalancer { private currentChild: LoadBalancer | null = null; private pendingChild: LoadBalancer | null = null; + private latestConfig: TypedLoadBalancingConfig | null = null; private ChildPolicyHelper = class { private child: LoadBalancer | null = null; @@ -38,7 +41,7 @@ export class ChildLoadBalancerHandler implements LoadBalancer { createSubchannel( subchannelAddress: SubchannelAddress, subchannelArgs: ChannelOptions - ): Subchannel { + ): SubchannelInterface { return this.parent.channelControlHelper.createSubchannel( subchannelAddress, subchannelArgs @@ -46,7 +49,7 @@ export class ChildLoadBalancerHandler implements LoadBalancer { } updateState(connectivityState: ConnectivityState, picker: Picker): void { if (this.calledByPendingChild()) { - if (connectivityState !== ConnectivityState.READY) { + if (connectivityState === ConnectivityState.CONNECTING) { return; } this.parent.currentChild?.destroy(); @@ -66,6 +69,13 @@ export class ChildLoadBalancerHandler implements LoadBalancer { setChild(newChild: LoadBalancer) { this.child = newChild; } + addChannelzChild(child: ChannelRef | SubchannelRef) { + this.parent.channelControlHelper.addChannelzChild(child); + } + removeChannelzChild(child: ChannelRef | SubchannelRef) { + this.parent.channelControlHelper.removeChannelzChild(child); + } + private calledByPendingChild(): boolean { return this.child === this.parent.pendingChild; } @@ -74,26 +84,37 @@ export class ChildLoadBalancerHandler implements LoadBalancer { } }; - constructor(private readonly channelControlHelper: ChannelControlHelper) {} + constructor( + private readonly channelControlHelper: ChannelControlHelper, + private readonly options: ChannelOptions + ) {} + + protected configUpdateRequiresNewPolicyInstance( + oldConfig: TypedLoadBalancingConfig, + newConfig: TypedLoadBalancingConfig + ): boolean { + return oldConfig.getLoadBalancerName() !== newConfig.getLoadBalancerName(); + } /** * Prerequisites: lbConfig !== null and lbConfig.name is registered - * @param addressList + * @param endpointList * @param lbConfig * @param attributes */ updateAddressList( - addressList: SubchannelAddress[], - lbConfig: LoadBalancingConfig, + endpointList: Endpoint[], + lbConfig: TypedLoadBalancingConfig, attributes: { [key: string]: unknown } ): void { let childToUpdate: LoadBalancer; if ( this.currentChild === null || - this.currentChild.getTypeName() !== lbConfig.getLoadBalancerName() + this.latestConfig === null || + this.configUpdateRequiresNewPolicyInstance(this.latestConfig, lbConfig) ) { const newHelper = new this.ChildPolicyHelper(this); - const newChild = createLoadBalancer(lbConfig, newHelper)!; + const newChild = createLoadBalancer(lbConfig, newHelper, this.options)!; newHelper.setChild(newChild); if (this.currentChild === null) { this.currentChild = newChild; @@ -112,13 +133,14 @@ export class ChildLoadBalancerHandler implements LoadBalancer { childToUpdate = this.pendingChild; } } - childToUpdate.updateAddressList(addressList, lbConfig, attributes); + this.latestConfig = lbConfig; + childToUpdate.updateAddressList(endpointList, lbConfig, attributes); } exitIdle(): void { if (this.currentChild) { - this.currentChild.resetBackoff(); + this.currentChild.exitIdle(); if (this.pendingChild) { - this.pendingChild.resetBackoff(); + this.pendingChild.exitIdle(); } } } @@ -131,6 +153,10 @@ export class ChildLoadBalancerHandler implements LoadBalancer { } } destroy(): void { + /* Note: state updates are only propagated from the child balancer if that + * object is equal to this.currentChild or this.pendingChild. Since this + * function sets both of those to null, no further state updates will + * occur after this function returns. */ if (this.currentChild) { this.currentChild.destroy(); this.currentChild = null; diff --git a/packages/grpc-js/src/load-balancer-outlier-detection.ts b/packages/grpc-js/src/load-balancer-outlier-detection.ts new file mode 100644 index 000000000..8f2097f46 --- /dev/null +++ b/packages/grpc-js/src/load-balancer-outlier-detection.ts @@ -0,0 +1,835 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { ChannelOptions } from './channel-options'; +import { ConnectivityState } from './connectivity-state'; +import { LogVerbosity, Status } from './constants'; +import { Duration, durationToMs, isDuration, msToDuration } from './duration'; +import { + ChannelControlHelper, + createChildChannelControlHelper, + registerLoadBalancerType, +} from './experimental'; +import { + selectLbConfigFromList, + LoadBalancer, + TypedLoadBalancingConfig, +} from './load-balancer'; +import { ChildLoadBalancerHandler } from './load-balancer-child-handler'; +import { PickArgs, Picker, PickResult, PickResultType } from './picker'; +import { + Endpoint, + EndpointMap, + SubchannelAddress, + endpointToString, +} from './subchannel-address'; +import { + BaseSubchannelWrapper, + SubchannelInterface, +} from './subchannel-interface'; +import * as logging from './logging'; +import { LoadBalancingConfig } from './service-config'; + +const TRACER_NAME = 'outlier_detection'; + +function trace(text: string): void { + logging.trace(LogVerbosity.DEBUG, TRACER_NAME, text); +} + +const TYPE_NAME = 'outlier_detection'; + +const OUTLIER_DETECTION_ENABLED = + (process.env.GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION ?? 'true') === 'true'; + +export interface SuccessRateEjectionConfig { + readonly stdev_factor: number; + readonly enforcement_percentage: number; + readonly minimum_hosts: number; + readonly request_volume: number; +} + +export interface FailurePercentageEjectionConfig { + readonly threshold: number; + readonly enforcement_percentage: number; + readonly minimum_hosts: number; + readonly request_volume: number; +} + +export interface OutlierDetectionRawConfig { + interval?: Duration; + base_ejection_time?: Duration; + max_ejection_time?: Duration; + max_ejection_percent?: number; + success_rate_ejection?: Partial; + failure_percentage_ejection?: Partial; + child_policy: LoadBalancingConfig[]; +} + +const defaultSuccessRateEjectionConfig: SuccessRateEjectionConfig = { + stdev_factor: 1900, + enforcement_percentage: 100, + minimum_hosts: 5, + request_volume: 100, +}; + +const defaultFailurePercentageEjectionConfig: FailurePercentageEjectionConfig = + { + threshold: 85, + enforcement_percentage: 100, + minimum_hosts: 5, + request_volume: 50, + }; + +type TypeofValues = + | 'object' + | 'boolean' + | 'function' + | 'number' + | 'string' + | 'undefined'; + +function validateFieldType( + obj: any, + fieldName: string, + expectedType: TypeofValues, + objectName?: string +) { + if ( + fieldName in obj && + obj[fieldName] !== undefined && + typeof obj[fieldName] !== expectedType + ) { + const fullFieldName = objectName ? `${objectName}.${fieldName}` : fieldName; + throw new Error( + `outlier detection config ${fullFieldName} parse error: expected ${expectedType}, got ${typeof obj[ + fieldName + ]}` + ); + } +} + +function validatePositiveDuration( + obj: any, + fieldName: string, + objectName?: string +) { + const fullFieldName = objectName ? `${objectName}.${fieldName}` : fieldName; + if (fieldName in obj && obj[fieldName] !== undefined) { + if (!isDuration(obj[fieldName])) { + throw new Error( + `outlier detection config ${fullFieldName} parse error: expected Duration, got ${typeof obj[ + fieldName + ]}` + ); + } + if ( + !( + obj[fieldName].seconds >= 0 && + obj[fieldName].seconds <= 315_576_000_000 && + obj[fieldName].nanos >= 0 && + obj[fieldName].nanos <= 999_999_999 + ) + ) { + throw new Error( + `outlier detection config ${fullFieldName} parse error: values out of range for non-negative Duaration` + ); + } + } +} + +function validatePercentage(obj: any, fieldName: string, objectName?: string) { + const fullFieldName = objectName ? `${objectName}.${fieldName}` : fieldName; + validateFieldType(obj, fieldName, 'number', objectName); + if ( + fieldName in obj && + obj[fieldName] !== undefined && + !(obj[fieldName] >= 0 && obj[fieldName] <= 100) + ) { + throw new Error( + `outlier detection config ${fullFieldName} parse error: value out of range for percentage (0-100)` + ); + } +} + +export class OutlierDetectionLoadBalancingConfig + implements TypedLoadBalancingConfig +{ + private readonly intervalMs: number; + private readonly baseEjectionTimeMs: number; + private readonly maxEjectionTimeMs: number; + private readonly maxEjectionPercent: number; + private readonly successRateEjection: SuccessRateEjectionConfig | null; + private readonly failurePercentageEjection: FailurePercentageEjectionConfig | null; + + constructor( + intervalMs: number | null, + baseEjectionTimeMs: number | null, + maxEjectionTimeMs: number | null, + maxEjectionPercent: number | null, + successRateEjection: Partial | null, + failurePercentageEjection: Partial | null, + private readonly childPolicy: TypedLoadBalancingConfig + ) { + if (childPolicy.getLoadBalancerName() === 'pick_first') { + throw new Error( + 'outlier_detection LB policy cannot have a pick_first child policy' + ); + } + this.intervalMs = intervalMs ?? 10_000; + this.baseEjectionTimeMs = baseEjectionTimeMs ?? 30_000; + this.maxEjectionTimeMs = maxEjectionTimeMs ?? 300_000; + this.maxEjectionPercent = maxEjectionPercent ?? 10; + this.successRateEjection = successRateEjection + ? { ...defaultSuccessRateEjectionConfig, ...successRateEjection } + : null; + this.failurePercentageEjection = failurePercentageEjection + ? { + ...defaultFailurePercentageEjectionConfig, + ...failurePercentageEjection, + } + : null; + } + getLoadBalancerName(): string { + return TYPE_NAME; + } + toJsonObject(): object { + return { + outlier_detection: { + interval: msToDuration(this.intervalMs), + base_ejection_time: msToDuration(this.baseEjectionTimeMs), + max_ejection_time: msToDuration(this.maxEjectionTimeMs), + max_ejection_percent: this.maxEjectionPercent, + success_rate_ejection: this.successRateEjection ?? undefined, + failure_percentage_ejection: + this.failurePercentageEjection ?? undefined, + child_policy: [this.childPolicy.toJsonObject()], + }, + }; + } + + getIntervalMs(): number { + return this.intervalMs; + } + getBaseEjectionTimeMs(): number { + return this.baseEjectionTimeMs; + } + getMaxEjectionTimeMs(): number { + return this.maxEjectionTimeMs; + } + getMaxEjectionPercent(): number { + return this.maxEjectionPercent; + } + getSuccessRateEjectionConfig(): SuccessRateEjectionConfig | null { + return this.successRateEjection; + } + getFailurePercentageEjectionConfig(): FailurePercentageEjectionConfig | null { + return this.failurePercentageEjection; + } + getChildPolicy(): TypedLoadBalancingConfig { + return this.childPolicy; + } + + static createFromJson(obj: any): OutlierDetectionLoadBalancingConfig { + validatePositiveDuration(obj, 'interval'); + validatePositiveDuration(obj, 'base_ejection_time'); + validatePositiveDuration(obj, 'max_ejection_time'); + validatePercentage(obj, 'max_ejection_percent'); + if ( + 'success_rate_ejection' in obj && + obj.success_rate_ejection !== undefined + ) { + if (typeof obj.success_rate_ejection !== 'object') { + throw new Error( + 'outlier detection config success_rate_ejection must be an object' + ); + } + validateFieldType( + obj.success_rate_ejection, + 'stdev_factor', + 'number', + 'success_rate_ejection' + ); + validatePercentage( + obj.success_rate_ejection, + 'enforcement_percentage', + 'success_rate_ejection' + ); + validateFieldType( + obj.success_rate_ejection, + 'minimum_hosts', + 'number', + 'success_rate_ejection' + ); + validateFieldType( + obj.success_rate_ejection, + 'request_volume', + 'number', + 'success_rate_ejection' + ); + } + if ( + 'failure_percentage_ejection' in obj && + obj.failure_percentage_ejection !== undefined + ) { + if (typeof obj.failure_percentage_ejection !== 'object') { + throw new Error( + 'outlier detection config failure_percentage_ejection must be an object' + ); + } + validatePercentage( + obj.failure_percentage_ejection, + 'threshold', + 'failure_percentage_ejection' + ); + validatePercentage( + obj.failure_percentage_ejection, + 'enforcement_percentage', + 'failure_percentage_ejection' + ); + validateFieldType( + obj.failure_percentage_ejection, + 'minimum_hosts', + 'number', + 'failure_percentage_ejection' + ); + validateFieldType( + obj.failure_percentage_ejection, + 'request_volume', + 'number', + 'failure_percentage_ejection' + ); + } + + if (!('child_policy' in obj) || !Array.isArray(obj.child_policy)) { + throw new Error('outlier detection config child_policy must be an array'); + } + const childPolicy = selectLbConfigFromList(obj.child_policy); + if (!childPolicy) { + throw new Error( + 'outlier detection config child_policy: no valid recognized policy found' + ); + } + + return new OutlierDetectionLoadBalancingConfig( + obj.interval ? durationToMs(obj.interval) : null, + obj.base_ejection_time ? durationToMs(obj.base_ejection_time) : null, + obj.max_ejection_time ? durationToMs(obj.max_ejection_time) : null, + obj.max_ejection_percent ?? null, + obj.success_rate_ejection, + obj.failure_percentage_ejection, + childPolicy + ); + } +} + +class OutlierDetectionSubchannelWrapper + extends BaseSubchannelWrapper + implements SubchannelInterface +{ + private refCount = 0; + constructor( + childSubchannel: SubchannelInterface, + private mapEntry?: MapEntry + ) { + super(childSubchannel); + } + + ref() { + this.child.ref(); + this.refCount += 1; + } + + unref() { + this.child.unref(); + this.refCount -= 1; + if (this.refCount <= 0) { + if (this.mapEntry) { + const index = this.mapEntry.subchannelWrappers.indexOf(this); + if (index >= 0) { + this.mapEntry.subchannelWrappers.splice(index, 1); + } + } + } + } + + eject() { + this.setHealthy(false); + } + + uneject() { + this.setHealthy(true); + } + + getMapEntry(): MapEntry | undefined { + return this.mapEntry; + } + + getWrappedSubchannel(): SubchannelInterface { + return this.child; + } +} + +interface CallCountBucket { + success: number; + failure: number; +} + +function createEmptyBucket(): CallCountBucket { + return { + success: 0, + failure: 0, + }; +} + +class CallCounter { + private activeBucket: CallCountBucket = createEmptyBucket(); + private inactiveBucket: CallCountBucket = createEmptyBucket(); + addSuccess() { + this.activeBucket.success += 1; + } + addFailure() { + this.activeBucket.failure += 1; + } + switchBuckets() { + this.inactiveBucket = this.activeBucket; + this.activeBucket = createEmptyBucket(); + } + getLastSuccesses() { + return this.inactiveBucket.success; + } + getLastFailures() { + return this.inactiveBucket.failure; + } +} + +class OutlierDetectionPicker implements Picker { + constructor(private wrappedPicker: Picker, private countCalls: boolean) {} + pick(pickArgs: PickArgs): PickResult { + const wrappedPick = this.wrappedPicker.pick(pickArgs); + if (wrappedPick.pickResultType === PickResultType.COMPLETE) { + const subchannelWrapper = + wrappedPick.subchannel as OutlierDetectionSubchannelWrapper; + const mapEntry = subchannelWrapper.getMapEntry(); + if (mapEntry) { + let onCallEnded = wrappedPick.onCallEnded; + if (this.countCalls) { + onCallEnded = statusCode => { + if (statusCode === Status.OK) { + mapEntry.counter.addSuccess(); + } else { + mapEntry.counter.addFailure(); + } + wrappedPick.onCallEnded?.(statusCode); + }; + } + return { + ...wrappedPick, + subchannel: subchannelWrapper.getWrappedSubchannel(), + onCallEnded: onCallEnded, + }; + } else { + return { + ...wrappedPick, + subchannel: subchannelWrapper.getWrappedSubchannel(), + }; + } + } else { + return wrappedPick; + } + } +} + +interface MapEntry { + counter: CallCounter; + currentEjectionTimestamp: Date | null; + ejectionTimeMultiplier: number; + subchannelWrappers: OutlierDetectionSubchannelWrapper[]; +} + +export class OutlierDetectionLoadBalancer implements LoadBalancer { + private childBalancer: ChildLoadBalancerHandler; + private entryMap = new EndpointMap(); + private latestConfig: OutlierDetectionLoadBalancingConfig | null = null; + private ejectionTimer: NodeJS.Timeout; + private timerStartTime: Date | null = null; + + constructor( + channelControlHelper: ChannelControlHelper, + options: ChannelOptions + ) { + this.childBalancer = new ChildLoadBalancerHandler( + createChildChannelControlHelper(channelControlHelper, { + createSubchannel: ( + subchannelAddress: SubchannelAddress, + subchannelArgs: ChannelOptions + ) => { + const originalSubchannel = channelControlHelper.createSubchannel( + subchannelAddress, + subchannelArgs + ); + const mapEntry = + this.entryMap.getForSubchannelAddress(subchannelAddress); + const subchannelWrapper = new OutlierDetectionSubchannelWrapper( + originalSubchannel, + mapEntry + ); + if (mapEntry?.currentEjectionTimestamp !== null) { + // If the address is ejected, propagate that to the new subchannel wrapper + subchannelWrapper.eject(); + } + mapEntry?.subchannelWrappers.push(subchannelWrapper); + return subchannelWrapper; + }, + updateState: (connectivityState: ConnectivityState, picker: Picker) => { + if (connectivityState === ConnectivityState.READY) { + channelControlHelper.updateState( + connectivityState, + new OutlierDetectionPicker(picker, this.isCountingEnabled()) + ); + } else { + channelControlHelper.updateState(connectivityState, picker); + } + }, + }), + options + ); + this.ejectionTimer = setInterval(() => {}, 0); + clearInterval(this.ejectionTimer); + } + + private isCountingEnabled(): boolean { + return ( + this.latestConfig !== null && + (this.latestConfig.getSuccessRateEjectionConfig() !== null || + this.latestConfig.getFailurePercentageEjectionConfig() !== null) + ); + } + + private getCurrentEjectionPercent() { + let ejectionCount = 0; + for (const mapEntry of this.entryMap.values()) { + if (mapEntry.currentEjectionTimestamp !== null) { + ejectionCount += 1; + } + } + return (ejectionCount * 100) / this.entryMap.size; + } + + private runSuccessRateCheck(ejectionTimestamp: Date) { + if (!this.latestConfig) { + return; + } + const successRateConfig = this.latestConfig.getSuccessRateEjectionConfig(); + if (!successRateConfig) { + return; + } + trace('Running success rate check'); + // Step 1 + const targetRequestVolume = successRateConfig.request_volume; + let addresesWithTargetVolume = 0; + const successRates: number[] = []; + for (const [endpoint, mapEntry] of this.entryMap.entries()) { + const successes = mapEntry.counter.getLastSuccesses(); + const failures = mapEntry.counter.getLastFailures(); + trace( + 'Stats for ' + + endpointToString(endpoint) + + ': successes=' + + successes + + ' failures=' + + failures + + ' targetRequestVolume=' + + targetRequestVolume + ); + if (successes + failures >= targetRequestVolume) { + addresesWithTargetVolume += 1; + successRates.push(successes / (successes + failures)); + } + } + trace( + 'Found ' + + addresesWithTargetVolume + + ' success rate candidates; currentEjectionPercent=' + + this.getCurrentEjectionPercent() + + ' successRates=[' + + successRates + + ']' + ); + if (addresesWithTargetVolume < successRateConfig.minimum_hosts) { + return; + } + + // Step 2 + const successRateMean = + successRates.reduce((a, b) => a + b) / successRates.length; + let successRateDeviationSum = 0; + for (const rate of successRates) { + const deviation = rate - successRateMean; + successRateDeviationSum += deviation * deviation; + } + const successRateVariance = successRateDeviationSum / successRates.length; + const successRateStdev = Math.sqrt(successRateVariance); + const ejectionThreshold = + successRateMean - + successRateStdev * (successRateConfig.stdev_factor / 1000); + trace( + 'stdev=' + successRateStdev + ' ejectionThreshold=' + ejectionThreshold + ); + + // Step 3 + for (const [address, mapEntry] of this.entryMap.entries()) { + // Step 3.i + if ( + this.getCurrentEjectionPercent() >= + this.latestConfig.getMaxEjectionPercent() + ) { + break; + } + // Step 3.ii + const successes = mapEntry.counter.getLastSuccesses(); + const failures = mapEntry.counter.getLastFailures(); + if (successes + failures < targetRequestVolume) { + continue; + } + // Step 3.iii + const successRate = successes / (successes + failures); + trace('Checking candidate ' + address + ' successRate=' + successRate); + if (successRate < ejectionThreshold) { + const randomNumber = Math.random() * 100; + trace( + 'Candidate ' + + address + + ' randomNumber=' + + randomNumber + + ' enforcement_percentage=' + + successRateConfig.enforcement_percentage + ); + if (randomNumber < successRateConfig.enforcement_percentage) { + trace('Ejecting candidate ' + address); + this.eject(mapEntry, ejectionTimestamp); + } + } + } + } + + private runFailurePercentageCheck(ejectionTimestamp: Date) { + if (!this.latestConfig) { + return; + } + const failurePercentageConfig = + this.latestConfig.getFailurePercentageEjectionConfig(); + if (!failurePercentageConfig) { + return; + } + trace( + 'Running failure percentage check. threshold=' + + failurePercentageConfig.threshold + + ' request volume threshold=' + + failurePercentageConfig.request_volume + ); + // Step 1 + let addressesWithTargetVolume = 0; + for (const mapEntry of this.entryMap.values()) { + const successes = mapEntry.counter.getLastSuccesses(); + const failures = mapEntry.counter.getLastFailures(); + if (successes + failures >= failurePercentageConfig.request_volume) { + addressesWithTargetVolume += 1; + } + } + if (addressesWithTargetVolume < failurePercentageConfig.minimum_hosts) { + return; + } + + // Step 2 + for (const [address, mapEntry] of this.entryMap.entries()) { + // Step 2.i + if ( + this.getCurrentEjectionPercent() >= + this.latestConfig.getMaxEjectionPercent() + ) { + break; + } + // Step 2.ii + const successes = mapEntry.counter.getLastSuccesses(); + const failures = mapEntry.counter.getLastFailures(); + trace('Candidate successes=' + successes + ' failures=' + failures); + if (successes + failures < failurePercentageConfig.request_volume) { + continue; + } + // Step 2.iii + const failurePercentage = (failures * 100) / (failures + successes); + if (failurePercentage > failurePercentageConfig.threshold) { + const randomNumber = Math.random() * 100; + trace( + 'Candidate ' + + address + + ' randomNumber=' + + randomNumber + + ' enforcement_percentage=' + + failurePercentageConfig.enforcement_percentage + ); + if (randomNumber < failurePercentageConfig.enforcement_percentage) { + trace('Ejecting candidate ' + address); + this.eject(mapEntry, ejectionTimestamp); + } + } + } + } + + private eject(mapEntry: MapEntry, ejectionTimestamp: Date) { + mapEntry.currentEjectionTimestamp = new Date(); + mapEntry.ejectionTimeMultiplier += 1; + for (const subchannelWrapper of mapEntry.subchannelWrappers) { + subchannelWrapper.eject(); + } + } + + private uneject(mapEntry: MapEntry) { + mapEntry.currentEjectionTimestamp = null; + for (const subchannelWrapper of mapEntry.subchannelWrappers) { + subchannelWrapper.uneject(); + } + } + + private switchAllBuckets() { + for (const mapEntry of this.entryMap.values()) { + mapEntry.counter.switchBuckets(); + } + } + + private startTimer(delayMs: number) { + this.ejectionTimer = setTimeout(() => this.runChecks(), delayMs); + this.ejectionTimer.unref?.(); + } + + private runChecks() { + const ejectionTimestamp = new Date(); + trace('Ejection timer running'); + + this.switchAllBuckets(); + + if (!this.latestConfig) { + return; + } + this.timerStartTime = ejectionTimestamp; + this.startTimer(this.latestConfig.getIntervalMs()); + + this.runSuccessRateCheck(ejectionTimestamp); + this.runFailurePercentageCheck(ejectionTimestamp); + + for (const [address, mapEntry] of this.entryMap.entries()) { + if (mapEntry.currentEjectionTimestamp === null) { + if (mapEntry.ejectionTimeMultiplier > 0) { + mapEntry.ejectionTimeMultiplier -= 1; + } + } else { + const baseEjectionTimeMs = this.latestConfig.getBaseEjectionTimeMs(); + const maxEjectionTimeMs = this.latestConfig.getMaxEjectionTimeMs(); + const returnTime = new Date( + mapEntry.currentEjectionTimestamp.getTime() + ); + returnTime.setMilliseconds( + returnTime.getMilliseconds() + + Math.min( + baseEjectionTimeMs * mapEntry.ejectionTimeMultiplier, + Math.max(baseEjectionTimeMs, maxEjectionTimeMs) + ) + ); + if (returnTime < new Date()) { + trace('Unejecting ' + address); + this.uneject(mapEntry); + } + } + } + } + + updateAddressList( + endpointList: Endpoint[], + lbConfig: TypedLoadBalancingConfig, + attributes: { [key: string]: unknown } + ): void { + if (!(lbConfig instanceof OutlierDetectionLoadBalancingConfig)) { + return; + } + for (const endpoint of endpointList) { + if (!this.entryMap.has(endpoint)) { + trace('Adding map entry for ' + endpointToString(endpoint)); + this.entryMap.set(endpoint, { + counter: new CallCounter(), + currentEjectionTimestamp: null, + ejectionTimeMultiplier: 0, + subchannelWrappers: [], + }); + } + } + this.entryMap.deleteMissing(endpointList); + const childPolicy = lbConfig.getChildPolicy(); + this.childBalancer.updateAddressList(endpointList, childPolicy, attributes); + + if ( + lbConfig.getSuccessRateEjectionConfig() || + lbConfig.getFailurePercentageEjectionConfig() + ) { + if (this.timerStartTime) { + trace('Previous timer existed. Replacing timer'); + clearTimeout(this.ejectionTimer); + const remainingDelay = + lbConfig.getIntervalMs() - + (new Date().getTime() - this.timerStartTime.getTime()); + this.startTimer(remainingDelay); + } else { + trace('Starting new timer'); + this.timerStartTime = new Date(); + this.startTimer(lbConfig.getIntervalMs()); + this.switchAllBuckets(); + } + } else { + trace('Counting disabled. Cancelling timer.'); + this.timerStartTime = null; + clearTimeout(this.ejectionTimer); + for (const mapEntry of this.entryMap.values()) { + this.uneject(mapEntry); + mapEntry.ejectionTimeMultiplier = 0; + } + } + + this.latestConfig = lbConfig; + } + exitIdle(): void { + this.childBalancer.exitIdle(); + } + resetBackoff(): void { + this.childBalancer.resetBackoff(); + } + destroy(): void { + clearTimeout(this.ejectionTimer); + this.childBalancer.destroy(); + } + getTypeName(): string { + return TYPE_NAME; + } +} + +export function setup() { + if (OUTLIER_DETECTION_ENABLED) { + registerLoadBalancerType( + TYPE_NAME, + OutlierDetectionLoadBalancer, + OutlierDetectionLoadBalancingConfig + ); + } +} diff --git a/packages/grpc-js/src/load-balancer-pick-first.ts b/packages/grpc-js/src/load-balancer-pick-first.ts index 31dc17847..f6c43b33d 100644 --- a/packages/grpc-js/src/load-balancer-pick-first.ts +++ b/packages/grpc-js/src/load-balancer-pick-first.ts @@ -18,10 +18,12 @@ import { LoadBalancer, ChannelControlHelper, + TypedLoadBalancingConfig, + registerDefaultLoadBalancerType, registerLoadBalancerType, - LoadBalancingConfig + createChildChannelControlHelper, } from './load-balancer'; -import { ConnectivityState } from './channel'; +import { ConnectivityState } from './connectivity-state'; import { QueuePicker, Picker, @@ -30,14 +32,17 @@ import { PickResultType, UnavailablePicker, } from './picker'; -import { - Subchannel, - ConnectivityStateListener, - SubchannelAddress, - subchannelAddressToString, -} from './subchannel'; +import { Endpoint, SubchannelAddress } from './subchannel-address'; import * as logging from './logging'; import { LogVerbosity } from './constants'; +import { + SubchannelInterface, + ConnectivityStateListener, + HealthListener, +} from './subchannel-interface'; +import { isTcpSubchannelAddress } from './subchannel-address'; +import { isIPv6 } from 'net'; +import { ChannelOptions } from './channel-options'; const TRACER_NAME = 'pick_first'; @@ -53,21 +58,36 @@ const TYPE_NAME = 'pick_first'; */ const CONNECTION_DELAY_INTERVAL_MS = 250; -export class PickFirstLoadBalancingConfig implements LoadBalancingConfig { +export class PickFirstLoadBalancingConfig implements TypedLoadBalancingConfig { + constructor(private readonly shuffleAddressList: boolean) {} + getLoadBalancerName(): string { return TYPE_NAME; } - constructor() {} - toJsonObject(): object { return { - [TYPE_NAME]: {} + [TYPE_NAME]: { + shuffleAddressList: this.shuffleAddressList, + }, }; } + getShuffleAddressList() { + return this.shuffleAddressList; + } + + // eslint-disable-next-line @typescript-eslint/no-explicit-any static createFromJson(obj: any) { - return new PickFirstLoadBalancingConfig(); + if ( + 'shuffleAddressList' in obj && + !(typeof obj.shuffleAddressList === 'boolean') + ) { + throw new Error( + 'pick_first config field shuffleAddressList must be a boolean if provided' + ); + } + return new PickFirstLoadBalancingConfig(obj.shuffleAddressList === true); } } @@ -76,37 +96,82 @@ export class PickFirstLoadBalancingConfig implements LoadBalancingConfig { * picked subchannel. */ class PickFirstPicker implements Picker { - constructor(private subchannel: Subchannel) {} + constructor(private subchannel: SubchannelInterface) {} pick(pickArgs: PickArgs): CompletePickResult { return { pickResultType: PickResultType.COMPLETE, subchannel: this.subchannel, status: null, - extraFilterFactory: null, onCallStarted: null, + onCallEnded: null, }; } } -interface ConnectivityStateCounts { - [ConnectivityState.CONNECTING]: number; - [ConnectivityState.IDLE]: number; - [ConnectivityState.READY]: number; - [ConnectivityState.SHUTDOWN]: number; - [ConnectivityState.TRANSIENT_FAILURE]: number; +interface SubchannelChild { + subchannel: SubchannelInterface; + hasReportedTransientFailure: boolean; +} + +/** + * Return a new array with the elements of the input array in a random order + * @param list The input array + * @returns A shuffled array of the elements of list + */ +export function shuffled(list: T[]): T[] { + const result = list.slice(); + for (let i = result.length - 1; i > 1; i--) { + const j = Math.floor(Math.random() * (i + 1)); + const temp = result[i]; + result[i] = result[j]; + result[j] = temp; + } + return result; +} + +/** + * Interleave addresses in addressList by family in accordance with RFC-8304 section 4 + * @param addressList + * @returns + */ +function interleaveAddressFamilies( + addressList: SubchannelAddress[] +): SubchannelAddress[] { + const result: SubchannelAddress[] = []; + const ipv6Addresses: SubchannelAddress[] = []; + const ipv4Addresses: SubchannelAddress[] = []; + const ipv6First = + isTcpSubchannelAddress(addressList[0]) && isIPv6(addressList[0].host); + for (const address of addressList) { + if (isTcpSubchannelAddress(address) && isIPv6(address.host)) { + ipv6Addresses.push(address); + } else { + ipv4Addresses.push(address); + } + } + const firstList = ipv6First ? ipv6Addresses : ipv4Addresses; + const secondList = ipv6First ? ipv4Addresses : ipv6Addresses; + for (let i = 0; i < Math.max(firstList.length, secondList.length); i++) { + if (i < firstList.length) { + result.push(firstList[i]); + } + if (i < secondList.length) { + result.push(secondList[i]); + } + } + return result; } +const REPORT_HEALTH_STATUS_OPTION_NAME = + 'grpc-node.internal.pick-first.report_health_status'; + export class PickFirstLoadBalancer implements LoadBalancer { - /** - * The list of backend addresses most recently passed to `updateAddressList`. - */ - private latestAddressList: SubchannelAddress[] = []; /** * The list of subchannels this load balancer is currently attempting to * connect to. */ - private subchannels: Subchannel[] = []; + private children: SubchannelChild[] = []; /** * The current connectivity state of the load balancer. */ @@ -116,23 +181,33 @@ export class PickFirstLoadBalancer implements LoadBalancer { * recently started connection attempt. */ private currentSubchannelIndex = 0; - - private subchannelStateCounts: ConnectivityStateCounts; /** * The currently picked subchannel used for making calls. Populated if * and only if the load balancer's current state is READY. In that case, * the subchannel's current state is also READY. */ - private currentPick: Subchannel | null = null; + private currentPick: SubchannelInterface | null = null; /** * Listener callback attached to each subchannel in the `subchannels` list * while establishing a connection. */ - private subchannelStateListener: ConnectivityStateListener; - /** - * Listener callback attached to the current picked subchannel. - */ - private pickedSubchannelStateListener: ConnectivityStateListener; + private subchannelStateListener: ConnectivityStateListener = ( + subchannel, + previousState, + newState, + keepaliveTime, + errorMessage + ) => { + this.onSubchannelStateUpdate( + subchannel, + previousState, + newState, + errorMessage + ); + }; + + private pickedSubchannelHealthListener: HealthListener = () => + this.calculateAndReportNewState(); /** * Timer reference for the timer tracking when to start */ @@ -140,6 +215,30 @@ export class PickFirstLoadBalancer implements LoadBalancer { private triedAllSubchannels = false; + /** + * The LB policy enters sticky TRANSIENT_FAILURE mode when all + * subchannels have failed to connect at least once, and it stays in that + * mode until a connection attempt is successful. While in sticky TF mode, + * the LB policy continuously attempts to connect to all of its subchannels. + */ + private stickyTransientFailureMode = false; + + private reportHealthStatus: boolean; + + /** + * Indicates whether we called channelControlHelper.requestReresolution since + * the last call to updateAddressList + */ + private requestedResolutionSinceLastUpdate = false; + + /** + * The most recent error reported by any subchannel as it transitioned to + * TRANSIENT_FAILURE. + */ + private lastError: string | null = null; + + private latestAddressList: SubchannelAddress[] | null = null; + /** * Load balancer that attempts to connect to each backend in the address list * in order, and picks the first one that connects, using it for every @@ -147,131 +246,141 @@ export class PickFirstLoadBalancer implements LoadBalancer { * @param channelControlHelper `ChannelControlHelper` instance provided by * this load balancer's owner. */ - constructor(private readonly channelControlHelper: ChannelControlHelper) { - this.subchannelStateCounts = { - [ConnectivityState.CONNECTING]: 0, - [ConnectivityState.IDLE]: 0, - [ConnectivityState.READY]: 0, - [ConnectivityState.SHUTDOWN]: 0, - [ConnectivityState.TRANSIENT_FAILURE]: 0, - }; - this.subchannelStateListener = ( - subchannel: Subchannel, - previousState: ConnectivityState, - newState: ConnectivityState - ) => { - this.subchannelStateCounts[previousState] -= 1; - this.subchannelStateCounts[newState] += 1; - /* If the subchannel we most recently attempted to start connecting - * to goes into TRANSIENT_FAILURE, immediately try to start - * connecting to the next one instead of waiting for the connection - * delay timer. */ - if ( - subchannel === this.subchannels[this.currentSubchannelIndex] && - newState === ConnectivityState.TRANSIENT_FAILURE - ) { - this.startNextSubchannelConnecting(); + constructor( + private readonly channelControlHelper: ChannelControlHelper, + options: ChannelOptions + ) { + this.connectionDelayTimeout = setTimeout(() => {}, 0); + clearTimeout(this.connectionDelayTimeout); + this.reportHealthStatus = options[REPORT_HEALTH_STATUS_OPTION_NAME]; + } + + private allChildrenHaveReportedTF(): boolean { + return this.children.every(child => child.hasReportedTransientFailure); + } + + private calculateAndReportNewState() { + if (this.currentPick) { + if (this.reportHealthStatus && !this.currentPick.isHealthy()) { + this.updateState( + ConnectivityState.TRANSIENT_FAILURE, + new UnavailablePicker({ + details: `Picked subchannel ${this.currentPick.getAddress()} is unhealthy`, + }) + ); + } else { + this.updateState( + ConnectivityState.READY, + new PickFirstPicker(this.currentPick) + ); } - if (newState === ConnectivityState.READY) { - this.pickSubchannel(subchannel); - return; + } else if (this.children.length === 0) { + this.updateState(ConnectivityState.IDLE, new QueuePicker(this)); + } else { + if (this.stickyTransientFailureMode) { + this.updateState( + ConnectivityState.TRANSIENT_FAILURE, + new UnavailablePicker({ + details: `No connection established. Last error: ${this.lastError}`, + }) + ); } else { - if ( - this.triedAllSubchannels && - this.subchannelStateCounts[ConnectivityState.IDLE] === - this.subchannels.length - ) { - /* If all of the subchannels are IDLE we should go back to a - * basic IDLE state where there is no subchannel list to avoid - * holding unused resources */ - this.resetSubchannelList(); - this.updateState(ConnectivityState.IDLE, new QueuePicker(this)); - return; - } - if (this.currentPick === null) { - if (this.triedAllSubchannels) { - let newLBState: ConnectivityState; - if (this.subchannelStateCounts[ConnectivityState.CONNECTING] > 0) { - newLBState = ConnectivityState.CONNECTING; - } else if ( - this.subchannelStateCounts[ConnectivityState.TRANSIENT_FAILURE] > - 0 - ) { - newLBState = ConnectivityState.TRANSIENT_FAILURE; - } else { - newLBState = ConnectivityState.IDLE; - } - if (newLBState !== this.currentState) { - if (newLBState === ConnectivityState.TRANSIENT_FAILURE) { - this.updateState(newLBState, new UnavailablePicker()); - } else { - this.updateState(newLBState, new QueuePicker(this)); - } - } - } else { - this.updateState( - ConnectivityState.CONNECTING, - new QueuePicker(this) - ); - } - } + this.updateState(ConnectivityState.CONNECTING, new QueuePicker(this)); } - }; - this.pickedSubchannelStateListener = ( - subchannel: Subchannel, - previousState: ConnectivityState, - newState: ConnectivityState - ) => { - if (newState !== ConnectivityState.READY) { - this.currentPick = null; - subchannel.unref(); - subchannel.removeConnectivityStateListener( - this.pickedSubchannelStateListener + } + } + + private requestReresolution() { + this.requestedResolutionSinceLastUpdate = true; + this.channelControlHelper.requestReresolution(); + } + + private maybeEnterStickyTransientFailureMode() { + if (!this.allChildrenHaveReportedTF()) { + return; + } + if (!this.requestedResolutionSinceLastUpdate) { + /* Each time we get an update we reset each subchannel's + * hasReportedTransientFailure flag, so the next time we get to this + * point after that, each subchannel has reported TRANSIENT_FAILURE + * at least once since then. That is the trigger for requesting + * reresolution, whether or not the LB policy is already in sticky TF + * mode. */ + this.requestReresolution(); + } + if (this.stickyTransientFailureMode) { + return; + } + this.stickyTransientFailureMode = true; + for (const { subchannel } of this.children) { + subchannel.startConnecting(); + } + this.calculateAndReportNewState(); + } + + private removeCurrentPick() { + if (this.currentPick !== null) { + /* Unref can cause a state change, which can cause a change in the value + * of this.currentPick, so we hold a local reference to make sure that + * does not impact this function. */ + const currentPick = this.currentPick; + this.currentPick = null; + currentPick.unref(); + currentPick.removeConnectivityStateListener(this.subchannelStateListener); + this.channelControlHelper.removeChannelzChild( + currentPick.getChannelzRef() + ); + if (this.reportHealthStatus) { + currentPick.removeHealthStateWatcher( + this.pickedSubchannelHealthListener ); - if (this.subchannels.length > 0) { - if (this.triedAllSubchannels) { - let newLBState: ConnectivityState; - if (this.subchannelStateCounts[ConnectivityState.CONNECTING] > 0) { - newLBState = ConnectivityState.CONNECTING; - } else if ( - this.subchannelStateCounts[ConnectivityState.TRANSIENT_FAILURE] > - 0 - ) { - newLBState = ConnectivityState.TRANSIENT_FAILURE; - } else { - newLBState = ConnectivityState.IDLE; - } - if (newLBState === ConnectivityState.TRANSIENT_FAILURE) { - this.updateState(newLBState, new UnavailablePicker()); - } else { - this.updateState(newLBState, new QueuePicker(this)); - } - } else { - this.updateState( - ConnectivityState.CONNECTING, - new QueuePicker(this) - ); + } + } + } + + private onSubchannelStateUpdate( + subchannel: SubchannelInterface, + previousState: ConnectivityState, + newState: ConnectivityState, + errorMessage?: string + ) { + if (this.currentPick?.realSubchannelEquals(subchannel)) { + if (newState !== ConnectivityState.READY) { + this.removeCurrentPick(); + this.calculateAndReportNewState(); + this.requestReresolution(); + } + return; + } + for (const [index, child] of this.children.entries()) { + if (subchannel.realSubchannelEquals(child.subchannel)) { + if (newState === ConnectivityState.READY) { + this.pickSubchannel(child.subchannel); + } + if (newState === ConnectivityState.TRANSIENT_FAILURE) { + child.hasReportedTransientFailure = true; + if (errorMessage) { + this.lastError = errorMessage; + } + this.maybeEnterStickyTransientFailureMode(); + if (index === this.currentSubchannelIndex) { + this.startNextSubchannelConnecting(index + 1); } - } else { - /* We don't need to backoff here because this only happens if a - * subchannel successfully connects then disconnects, so it will not - * create a loop of attempting to connect to an unreachable backend - */ - this.updateState(ConnectivityState.IDLE, new QueuePicker(this)); } + child.subchannel.startConnecting(); + return; } - }; - this.connectionDelayTimeout = setTimeout(() => {}, 0); - clearTimeout(this.connectionDelayTimeout); + } } - private startNextSubchannelConnecting() { + private startNextSubchannelConnecting(startIndex: number) { + clearTimeout(this.connectionDelayTimeout); if (this.triedAllSubchannels) { return; } - for (const [index, subchannel] of this.subchannels.entries()) { - if (index > this.currentSubchannelIndex) { - const subchannelState = subchannel.getConnectivityState(); + for (const [index, child] of this.children.entries()) { + if (index >= startIndex) { + const subchannelState = child.subchannel.getConnectivityState(); if ( subchannelState === ConnectivityState.IDLE || subchannelState === ConnectivityState.CONNECTING @@ -282,6 +391,7 @@ export class PickFirstLoadBalancer implements LoadBalancer { } } this.triedAllSubchannels = true; + this.maybeEnterStickyTransientFailureMode(); } /** @@ -292,36 +402,39 @@ export class PickFirstLoadBalancer implements LoadBalancer { clearTimeout(this.connectionDelayTimeout); this.currentSubchannelIndex = subchannelIndex; if ( - this.subchannels[subchannelIndex].getConnectivityState() === + this.children[subchannelIndex].subchannel.getConnectivityState() === ConnectivityState.IDLE ) { trace( 'Start connecting to subchannel with address ' + - this.subchannels[subchannelIndex].getAddress() + this.children[subchannelIndex].subchannel.getAddress() ); process.nextTick(() => { - this.subchannels[subchannelIndex].startConnecting(); + this.children[subchannelIndex]?.subchannel.startConnecting(); }); } this.connectionDelayTimeout = setTimeout(() => { - this.startNextSubchannelConnecting(); + this.startNextSubchannelConnecting(subchannelIndex + 1); }, CONNECTION_DELAY_INTERVAL_MS); + this.connectionDelayTimeout.unref?.(); } - private pickSubchannel(subchannel: Subchannel) { - trace('Pick subchannel with address ' + subchannel.getAddress()); - if (this.currentPick !== null) { - this.currentPick.unref(); - this.currentPick.removeConnectivityStateListener( - this.pickedSubchannelStateListener - ); + private pickSubchannel(subchannel: SubchannelInterface) { + if (this.currentPick && subchannel.realSubchannelEquals(this.currentPick)) { + return; } + trace('Pick subchannel with address ' + subchannel.getAddress()); + this.stickyTransientFailureMode = false; + this.removeCurrentPick(); this.currentPick = subchannel; - this.updateState(ConnectivityState.READY, new PickFirstPicker(subchannel)); - subchannel.addConnectivityStateListener(this.pickedSubchannelStateListener); subchannel.ref(); + if (this.reportHealthStatus) { + subchannel.addHealthStateWatcher(this.pickedSubchannelHealthListener); + } + this.channelControlHelper.addChannelzChild(subchannel.getChannelzRef()); this.resetSubchannelList(); clearTimeout(this.connectionDelayTimeout); + this.calculateAndReportNewState(); } private updateState(newState: ConnectivityState, picker: Picker) { @@ -335,104 +448,99 @@ export class PickFirstLoadBalancer implements LoadBalancer { } private resetSubchannelList() { - for (const subchannel of this.subchannels) { - subchannel.removeConnectivityStateListener(this.subchannelStateListener); - subchannel.unref(); + for (const child of this.children) { + if ( + !( + this.currentPick && + child.subchannel.realSubchannelEquals(this.currentPick) + ) + ) { + /* The connectivity state listener is the same whether the subchannel + * is in the list of children or it is the currentPick, so if it is in + * both, removing it here would cause problems. In particular, that + * always happens immediately after the subchannel is picked. */ + child.subchannel.removeConnectivityStateListener( + this.subchannelStateListener + ); + } + /* Refs are counted independently for the children list and the + * currentPick, so we call unref whether or not the child is the + * currentPick. Channelz child references are also refcounted, so + * removeChannelzChild can be handled the same way. */ + child.subchannel.unref(); + this.channelControlHelper.removeChannelzChild( + child.subchannel.getChannelzRef() + ); } this.currentSubchannelIndex = 0; - this.subchannelStateCounts = { - [ConnectivityState.CONNECTING]: 0, - [ConnectivityState.IDLE]: 0, - [ConnectivityState.READY]: 0, - [ConnectivityState.SHUTDOWN]: 0, - [ConnectivityState.TRANSIENT_FAILURE]: 0, - }; - this.subchannels = []; + this.children = []; this.triedAllSubchannels = false; + this.requestedResolutionSinceLastUpdate = false; } - /** - * Start connecting to the address list most recently passed to - * `updateAddressList`. - */ - private connectToAddressList(): void { - this.resetSubchannelList(); - trace( - 'Connect to address list ' + - this.latestAddressList.map((address) => - subchannelAddressToString(address) - ) - ); - this.subchannels = this.latestAddressList.map((address) => - this.channelControlHelper.createSubchannel(address, {}) - ); - for (const subchannel of this.subchannels) { + private connectToAddressList(addressList: SubchannelAddress[]) { + const newChildrenList = addressList.map(address => ({ + subchannel: this.channelControlHelper.createSubchannel(address, {}), + hasReportedTransientFailure: false, + })); + /* Ref each subchannel before resetting the list, to ensure that + * subchannels shared between the list don't drop to 0 refs during the + * transition. */ + for (const { subchannel } of newChildrenList) { subchannel.ref(); + this.channelControlHelper.addChannelzChild(subchannel.getChannelzRef()); } - for (const subchannel of this.subchannels) { + this.resetSubchannelList(); + this.children = newChildrenList; + for (const { subchannel } of this.children) { subchannel.addConnectivityStateListener(this.subchannelStateListener); - this.subchannelStateCounts[subchannel.getConnectivityState()] += 1; if (subchannel.getConnectivityState() === ConnectivityState.READY) { this.pickSubchannel(subchannel); - this.resetSubchannelList(); return; } } - for (const [index, subchannel] of this.subchannels.entries()) { - const subchannelState = subchannel.getConnectivityState(); + for (const child of this.children) { if ( - subchannelState === ConnectivityState.IDLE || - subchannelState === ConnectivityState.CONNECTING + child.subchannel.getConnectivityState() === + ConnectivityState.TRANSIENT_FAILURE ) { - this.startConnecting(index); - if (this.currentPick === null) { - this.updateState(ConnectivityState.CONNECTING, new QueuePicker(this)); - } - return; + child.hasReportedTransientFailure = true; } } - // If the code reaches this point, every subchannel must be in TRANSIENT_FAILURE - if (this.currentPick === null) { - this.updateState( - ConnectivityState.TRANSIENT_FAILURE, - new UnavailablePicker() - ); - } + this.startNextSubchannelConnecting(0); + this.calculateAndReportNewState(); } updateAddressList( - addressList: SubchannelAddress[], - lbConfig: LoadBalancingConfig + endpointList: Endpoint[], + lbConfig: TypedLoadBalancingConfig ): void { - // lbConfig has no useful information for pick first load balancing - /* To avoid unnecessary churn, we only do something with this address list - * if we're not currently trying to establish a connection, or if the new - * address list is different from the existing one */ - if ( - this.subchannels.length === 0 || - !this.latestAddressList.every( - (value, index) => addressList[index] === value - ) - ) { - this.latestAddressList = addressList; - this.connectToAddressList(); + if (!(lbConfig instanceof PickFirstLoadBalancingConfig)) { + return; + } + /* Previously, an update would be discarded if it was identical to the + * previous update, to minimize churn. Now the DNS resolver is + * rate-limited, so that is less of a concern. */ + if (lbConfig.getShuffleAddressList()) { + endpointList = shuffled(endpointList); + } + const rawAddressList = ([] as SubchannelAddress[]).concat( + ...endpointList.map(endpoint => endpoint.addresses) + ); + if (rawAddressList.length === 0) { + throw new Error('No addresses in endpoint list passed to pick_first'); } + const addressList = interleaveAddressFamilies(rawAddressList); + this.latestAddressList = addressList; + this.connectToAddressList(addressList); } exitIdle() { - for (const subchannel of this.subchannels) { - subchannel.startConnecting(); - } - if (this.currentState === ConnectivityState.IDLE) { - if (this.latestAddressList.length > 0) { - this.connectToAddressList(); - } - } if ( - this.currentState === ConnectivityState.IDLE || - this.triedAllSubchannels + this.currentState === ConnectivityState.IDLE && + this.latestAddressList ) { - this.channelControlHelper.requestReresolution(); + this.connectToAddressList(this.latestAddressList); } } @@ -443,12 +551,7 @@ export class PickFirstLoadBalancer implements LoadBalancer { destroy() { this.resetSubchannelList(); - if (this.currentPick !== null) { - this.currentPick.unref(); - this.currentPick.removeConnectivityStateListener( - this.pickedSubchannelStateListener - ); - } + this.removeCurrentPick(); } getTypeName(): string { @@ -456,6 +559,82 @@ export class PickFirstLoadBalancer implements LoadBalancer { } } +const LEAF_CONFIG = new PickFirstLoadBalancingConfig(false); + +/** + * This class handles the leaf load balancing operations for a single endpoint. + * It is a thin wrapper around a PickFirstLoadBalancer with a different API + * that more closely reflects how it will be used as a leaf balancer. + */ +export class LeafLoadBalancer { + private pickFirstBalancer: PickFirstLoadBalancer; + private latestState: ConnectivityState = ConnectivityState.IDLE; + private latestPicker: Picker; + constructor( + private endpoint: Endpoint, + channelControlHelper: ChannelControlHelper, + options: ChannelOptions + ) { + const childChannelControlHelper = createChildChannelControlHelper( + channelControlHelper, + { + updateState: (connectivityState, picker) => { + this.latestState = connectivityState; + this.latestPicker = picker; + channelControlHelper.updateState(connectivityState, picker); + }, + } + ); + this.pickFirstBalancer = new PickFirstLoadBalancer( + childChannelControlHelper, + { ...options, [REPORT_HEALTH_STATUS_OPTION_NAME]: true } + ); + this.latestPicker = new QueuePicker(this.pickFirstBalancer); + } + + startConnecting() { + this.pickFirstBalancer.updateAddressList([this.endpoint], LEAF_CONFIG); + } + + /** + * Update the endpoint associated with this LeafLoadBalancer to a new + * endpoint. Does not trigger connection establishment if a connection + * attempt is not already in progress. + * @param newEndpoint + */ + updateEndpoint(newEndpoint: Endpoint) { + this.endpoint = newEndpoint; + if (this.latestState !== ConnectivityState.IDLE) { + this.startConnecting(); + } + } + + getConnectivityState() { + return this.latestState; + } + + getPicker() { + return this.latestPicker; + } + + getEndpoint() { + return this.endpoint; + } + + exitIdle() { + this.pickFirstBalancer.exitIdle(); + } + + destroy() { + this.pickFirstBalancer.destroy(); + } +} + export function setup(): void { - registerLoadBalancerType(TYPE_NAME, PickFirstLoadBalancer, PickFirstLoadBalancingConfig); + registerLoadBalancerType( + TYPE_NAME, + PickFirstLoadBalancer, + PickFirstLoadBalancingConfig + ); + registerDefaultLoadBalancerType(TYPE_NAME); } diff --git a/packages/grpc-js/src/load-balancer-round-robin.ts b/packages/grpc-js/src/load-balancer-round-robin.ts index daba45941..7e70c554f 100644 --- a/packages/grpc-js/src/load-balancer-round-robin.ts +++ b/packages/grpc-js/src/load-balancer-round-robin.ts @@ -18,26 +18,27 @@ import { LoadBalancer, ChannelControlHelper, + TypedLoadBalancingConfig, registerLoadBalancerType, - LoadBalancingConfig + createChildChannelControlHelper, } from './load-balancer'; -import { ConnectivityState } from './channel'; +import { ConnectivityState } from './connectivity-state'; import { QueuePicker, Picker, PickArgs, - CompletePickResult, - PickResultType, UnavailablePicker, + PickResult, } from './picker'; -import { - Subchannel, - ConnectivityStateListener, - SubchannelAddress, - subchannelAddressToString, -} from './subchannel'; import * as logging from './logging'; import { LogVerbosity } from './constants'; +import { + Endpoint, + endpointEqual, + endpointToString, +} from './subchannel-address'; +import { LeafLoadBalancer } from './load-balancer-pick-first'; +import { ChannelOptions } from './channel-options'; const TRACER_NAME = 'round_robin'; @@ -47,7 +48,7 @@ function trace(text: string): void { const TYPE_NAME = 'round_robin'; -class RoundRobinLoadBalancingConfig implements LoadBalancingConfig { +class RoundRobinLoadBalancingConfig implements TypedLoadBalancingConfig { getLoadBalancerName(): string { return TYPE_NAME; } @@ -56,10 +57,11 @@ class RoundRobinLoadBalancingConfig implements LoadBalancingConfig { toJsonObject(): object { return { - [TYPE_NAME]: {} + [TYPE_NAME]: {}, }; } + // eslint-disable-next-line @typescript-eslint/no-explicit-any static createFromJson(obj: any) { return new RoundRobinLoadBalancingConfig(); } @@ -67,20 +69,14 @@ class RoundRobinLoadBalancingConfig implements LoadBalancingConfig { class RoundRobinPicker implements Picker { constructor( - private readonly subchannelList: Subchannel[], + private readonly children: { endpoint: Endpoint; picker: Picker }[], private nextIndex = 0 ) {} - pick(pickArgs: PickArgs): CompletePickResult { - const pickedSubchannel = this.subchannelList[this.nextIndex]; - this.nextIndex = (this.nextIndex + 1) % this.subchannelList.length; - return { - pickResultType: PickResultType.COMPLETE, - subchannel: pickedSubchannel, - status: null, - extraFilterFactory: null, - onCallStarted: null, - }; + pick(pickArgs: PickArgs): PickResult { + const childPicker = this.children[this.nextIndex].picker; + this.nextIndex = (this.nextIndex + 1) % this.children.length; + return childPicker.pick(pickArgs); } /** @@ -88,67 +84,56 @@ class RoundRobinPicker implements Picker { * balancer implementation to preserve this part of the picker state if * possible when a subchannel connects or disconnects. */ - peekNextSubchannel(): Subchannel { - return this.subchannelList[this.nextIndex]; + peekNextEndpoint(): Endpoint { + return this.children[this.nextIndex].endpoint; } } -interface ConnectivityStateCounts { - [ConnectivityState.CONNECTING]: number; - [ConnectivityState.IDLE]: number; - [ConnectivityState.READY]: number; - [ConnectivityState.SHUTDOWN]: number; - [ConnectivityState.TRANSIENT_FAILURE]: number; -} - export class RoundRobinLoadBalancer implements LoadBalancer { - private subchannels: Subchannel[] = []; + private children: LeafLoadBalancer[] = []; private currentState: ConnectivityState = ConnectivityState.IDLE; - private subchannelStateListener: ConnectivityStateListener; + private currentReadyPicker: RoundRobinPicker | null = null; - private subchannelStateCounts: ConnectivityStateCounts; + private updatesPaused = false; - private currentReadyPicker: RoundRobinPicker | null = null; + private childChannelControlHelper: ChannelControlHelper; - constructor(private readonly channelControlHelper: ChannelControlHelper) { - this.subchannelStateCounts = { - [ConnectivityState.CONNECTING]: 0, - [ConnectivityState.IDLE]: 0, - [ConnectivityState.READY]: 0, - [ConnectivityState.SHUTDOWN]: 0, - [ConnectivityState.TRANSIENT_FAILURE]: 0, - }; - this.subchannelStateListener = ( - subchannel: Subchannel, - previousState: ConnectivityState, - newState: ConnectivityState - ) => { - this.subchannelStateCounts[previousState] -= 1; - this.subchannelStateCounts[newState] += 1; - this.calculateAndUpdateState(); - - if ( - newState === ConnectivityState.TRANSIENT_FAILURE || - newState === ConnectivityState.IDLE - ) { - this.channelControlHelper.requestReresolution(); - subchannel.startConnecting(); + private lastError: string | null = null; + + constructor( + private readonly channelControlHelper: ChannelControlHelper, + private readonly options: ChannelOptions + ) { + this.childChannelControlHelper = createChildChannelControlHelper( + channelControlHelper, + { + updateState: (connectivityState, picker) => { + this.calculateAndUpdateState(); + }, } - }; + ); + } + + private countChildrenWithState(state: ConnectivityState) { + return this.children.filter(child => child.getConnectivityState() === state) + .length; } private calculateAndUpdateState() { - if (this.subchannelStateCounts[ConnectivityState.READY] > 0) { - const readySubchannels = this.subchannels.filter( - (subchannel) => - subchannel.getConnectivityState() === ConnectivityState.READY + if (this.updatesPaused) { + return; + } + if (this.countChildrenWithState(ConnectivityState.READY) > 0) { + const readyChildren = this.children.filter( + child => child.getConnectivityState() === ConnectivityState.READY ); let index = 0; if (this.currentReadyPicker !== null) { - index = readySubchannels.indexOf( - this.currentReadyPicker.peekNextSubchannel() + const nextPickedEndpoint = this.currentReadyPicker.peekNextEndpoint(); + index = readyChildren.findIndex(child => + endpointEqual(child.getEndpoint(), nextPickedEndpoint) ); if (index < 0) { index = 0; @@ -156,20 +141,37 @@ export class RoundRobinLoadBalancer implements LoadBalancer { } this.updateState( ConnectivityState.READY, - new RoundRobinPicker(readySubchannels, index) + new RoundRobinPicker( + readyChildren.map(child => ({ + endpoint: child.getEndpoint(), + picker: child.getPicker(), + })), + index + ) ); - } else if (this.subchannelStateCounts[ConnectivityState.CONNECTING] > 0) { + } else if (this.countChildrenWithState(ConnectivityState.CONNECTING) > 0) { this.updateState(ConnectivityState.CONNECTING, new QueuePicker(this)); } else if ( - this.subchannelStateCounts[ConnectivityState.TRANSIENT_FAILURE] > 0 + this.countChildrenWithState(ConnectivityState.TRANSIENT_FAILURE) > 0 ) { this.updateState( ConnectivityState.TRANSIENT_FAILURE, - new UnavailablePicker() + new UnavailablePicker({ + details: `No connection established. Last error: ${this.lastError}`, + }) ); } else { this.updateState(ConnectivityState.IDLE, new QueuePicker(this)); } + /* round_robin should keep all children connected, this is how we do that. + * We can't do this more efficiently in the individual child's updateState + * callback because that doesn't have a reference to which child the state + * change is associated with. */ + for (const child of this.children) { + if (child.getConnectivityState() === ConnectivityState.IDLE) { + child.exitIdle(); + } + } } private updateState(newState: ConnectivityState, picker: Picker) { @@ -188,55 +190,40 @@ export class RoundRobinLoadBalancer implements LoadBalancer { } private resetSubchannelList() { - for (const subchannel of this.subchannels) { - subchannel.removeConnectivityStateListener(this.subchannelStateListener); - subchannel.unref(); + for (const child of this.children) { + child.destroy(); } - this.subchannelStateCounts = { - [ConnectivityState.CONNECTING]: 0, - [ConnectivityState.IDLE]: 0, - [ConnectivityState.READY]: 0, - [ConnectivityState.SHUTDOWN]: 0, - [ConnectivityState.TRANSIENT_FAILURE]: 0, - }; - this.subchannels = []; } updateAddressList( - addressList: SubchannelAddress[], - lbConfig: LoadBalancingConfig + endpointList: Endpoint[], + lbConfig: TypedLoadBalancingConfig ): void { this.resetSubchannelList(); - trace( - 'Connect to address list ' + - addressList.map((address) => subchannelAddressToString(address)) - ); - this.subchannels = addressList.map((address) => - this.channelControlHelper.createSubchannel(address, {}) + trace('Connect to endpoint list ' + endpointList.map(endpointToString)); + this.updatesPaused = true; + this.children = endpointList.map( + endpoint => + new LeafLoadBalancer( + endpoint, + this.childChannelControlHelper, + this.options + ) ); - for (const subchannel of this.subchannels) { - subchannel.ref(); - subchannel.addConnectivityStateListener(this.subchannelStateListener); - const subchannelState = subchannel.getConnectivityState(); - this.subchannelStateCounts[subchannelState] += 1; - if ( - subchannelState === ConnectivityState.IDLE || - subchannelState === ConnectivityState.TRANSIENT_FAILURE - ) { - subchannel.startConnecting(); - } + for (const child of this.children) { + child.startConnecting(); } + this.updatesPaused = false; this.calculateAndUpdateState(); } exitIdle(): void { - for (const subchannel of this.subchannels) { - subchannel.startConnecting(); - } + /* The round_robin LB policy is only in the IDLE state if it has no + * addresses to try to connect to and it has no picked subchannel. + * In that case, there is no meaningful action that can be taken here. */ } resetBackoff(): void { - /* The pick first load balancer does not have a connection backoff, so this - * does nothing */ + // This LB policy has no backoff to reset } destroy(): void { this.resetSubchannelList(); @@ -247,5 +234,9 @@ export class RoundRobinLoadBalancer implements LoadBalancer { } export function setup() { - registerLoadBalancerType(TYPE_NAME, RoundRobinLoadBalancer, RoundRobinLoadBalancingConfig); + registerLoadBalancerType( + TYPE_NAME, + RoundRobinLoadBalancer, + RoundRobinLoadBalancingConfig + ); } diff --git a/packages/grpc-js/src/load-balancer.ts b/packages/grpc-js/src/load-balancer.ts index 8d5c7c837..fb353a59a 100644 --- a/packages/grpc-js/src/load-balancer.ts +++ b/packages/grpc-js/src/load-balancer.ts @@ -16,11 +16,14 @@ */ import { ChannelOptions } from './channel-options'; -import { Subchannel, SubchannelAddress } from './subchannel'; -import { ConnectivityState } from './channel'; +import { Endpoint, SubchannelAddress } from './subchannel-address'; +import { ConnectivityState } from './connectivity-state'; import { Picker } from './picker'; -import * as load_balancer_pick_first from './load-balancer-pick-first'; -import * as load_balancer_round_robin from './load-balancer-round-robin'; +import type { ChannelRef, SubchannelRef } from './channelz'; +import { SubchannelInterface } from './subchannel-interface'; +import { LoadBalancingConfig } from './service-config'; +import { log } from './logging'; +import { LogVerbosity } from './constants'; /** * A collection of functions associated with a channel that a load balancer @@ -35,7 +38,7 @@ export interface ChannelControlHelper { createSubchannel( subchannelAddress: SubchannelAddress, subchannelArgs: ChannelOptions - ): Subchannel; + ): SubchannelInterface; /** * Passes a new subchannel picker up to the channel. This is called if either * the connectivity state changes or if a different picker is needed for any @@ -48,6 +51,38 @@ export interface ChannelControlHelper { * Request new data from the resolver. */ requestReresolution(): void; + addChannelzChild(child: ChannelRef | SubchannelRef): void; + removeChannelzChild(child: ChannelRef | SubchannelRef): void; +} + +/** + * Create a child ChannelControlHelper that overrides some methods of the + * parent while letting others pass through to the parent unmodified. This + * allows other code to create these children without needing to know about + * all of the methods to be passed through. + * @param parent + * @param overrides + */ +export function createChildChannelControlHelper( + parent: ChannelControlHelper, + overrides: Partial +): ChannelControlHelper { + return { + createSubchannel: + overrides.createSubchannel?.bind(overrides) ?? + parent.createSubchannel.bind(parent), + updateState: + overrides.updateState?.bind(overrides) ?? parent.updateState.bind(parent), + requestReresolution: + overrides.requestReresolution?.bind(overrides) ?? + parent.requestReresolution.bind(parent), + addChannelzChild: + overrides.addChannelzChild?.bind(overrides) ?? + parent.addChannelzChild.bind(parent), + removeChannelzChild: + overrides.removeChannelzChild?.bind(overrides) ?? + parent.removeChannelzChild.bind(parent), + }; } /** @@ -60,13 +95,13 @@ export interface LoadBalancer { * The load balancer will start establishing connections with the new list, * but will continue using any existing connections until the new connections * are established - * @param addressList The new list of addresses to connect to + * @param endpointList The new list of addresses to connect to * @param lbConfig The load balancing config object from the service config, * if one was provided */ updateAddressList( - addressList: SubchannelAddress[], - lbConfig: LoadBalancingConfig, + endpointList: Endpoint[], + lbConfig: TypedLoadBalancingConfig, attributes: { [key: string]: unknown } ): void; /** @@ -93,44 +128,59 @@ export interface LoadBalancer { } export interface LoadBalancerConstructor { - new (channelControlHelper: ChannelControlHelper): LoadBalancer; + new ( + channelControlHelper: ChannelControlHelper, + options: ChannelOptions + ): LoadBalancer; } -export interface LoadBalancingConfig { +export interface TypedLoadBalancingConfig { getLoadBalancerName(): string; toJsonObject(): object; } -export interface LoadBalancingConfigConstructor { - new(...args: any): LoadBalancingConfig; - createFromJson(obj: any): LoadBalancingConfig; +export interface TypedLoadBalancingConfigConstructor { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + new (...args: any): TypedLoadBalancingConfig; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + createFromJson(obj: any): TypedLoadBalancingConfig; } const registeredLoadBalancerTypes: { [name: string]: { - LoadBalancer: LoadBalancerConstructor, - LoadBalancingConfig: LoadBalancingConfigConstructor + LoadBalancer: LoadBalancerConstructor; + LoadBalancingConfig: TypedLoadBalancingConfigConstructor; }; } = {}; +let defaultLoadBalancerType: string | null = null; + export function registerLoadBalancerType( typeName: string, loadBalancerType: LoadBalancerConstructor, - loadBalancingConfigType: LoadBalancingConfigConstructor + loadBalancingConfigType: TypedLoadBalancingConfigConstructor ) { registeredLoadBalancerTypes[typeName] = { LoadBalancer: loadBalancerType, - LoadBalancingConfig: loadBalancingConfigType + LoadBalancingConfig: loadBalancingConfigType, }; } +export function registerDefaultLoadBalancerType(typeName: string) { + defaultLoadBalancerType = typeName; +} + export function createLoadBalancer( - config: LoadBalancingConfig, - channelControlHelper: ChannelControlHelper + config: TypedLoadBalancingConfig, + channelControlHelper: ChannelControlHelper, + options: ChannelOptions ): LoadBalancer | null { const typeName = config.getLoadBalancerName(); if (typeName in registeredLoadBalancerTypes) { - return new registeredLoadBalancerTypes[typeName].LoadBalancer(channelControlHelper); + return new registeredLoadBalancerTypes[typeName].LoadBalancer( + channelControlHelper, + options + ); } else { return null; } @@ -140,40 +190,63 @@ export function isLoadBalancerNameRegistered(typeName: string): boolean { return typeName in registeredLoadBalancerTypes; } -export function getFirstUsableConfig(configs: LoadBalancingConfig[], defaultPickFirst?: true): LoadBalancingConfig; -export function getFirstUsableConfig( - configs: LoadBalancingConfig[], - defaultPickFirst: boolean = false -): LoadBalancingConfig | null { - for (const config of configs) { - if (config.getLoadBalancerName() in registeredLoadBalancerTypes) { - return config; - } - } - if (defaultPickFirst) { - return new load_balancer_pick_first.PickFirstLoadBalancingConfig() - } else { - return null; - } -} - -export function validateLoadBalancingConfig(obj: any): LoadBalancingConfig { - if (!(obj !== null && (typeof obj === 'object'))) { - throw new Error('Load balancing config must be an object'); - } - const keys = Object.keys(obj); +export function parseLoadBalancingConfig( + rawConfig: LoadBalancingConfig +): TypedLoadBalancingConfig { + const keys = Object.keys(rawConfig); if (keys.length !== 1) { - throw new Error('Provided load balancing config has multiple conflicting entries'); + throw new Error( + 'Provided load balancing config has multiple conflicting entries' + ); } const typeName = keys[0]; if (typeName in registeredLoadBalancerTypes) { - return registeredLoadBalancerTypes[typeName].LoadBalancingConfig.createFromJson(obj[typeName]); + try { + return registeredLoadBalancerTypes[ + typeName + ].LoadBalancingConfig.createFromJson(rawConfig[typeName]); + } catch (e) { + throw new Error(`${typeName}: ${(e as Error).message}`); + } } else { throw new Error(`Unrecognized load balancing config name ${typeName}`); } } -export function registerAll() { - load_balancer_pick_first.setup(); - load_balancer_round_robin.setup(); +export function getDefaultConfig() { + if (!defaultLoadBalancerType) { + throw new Error('No default load balancer type registered'); + } + return new registeredLoadBalancerTypes[ + defaultLoadBalancerType + ]!.LoadBalancingConfig(); +} + +export function selectLbConfigFromList( + configs: LoadBalancingConfig[], + fallbackTodefault = false +): TypedLoadBalancingConfig | null { + for (const config of configs) { + try { + return parseLoadBalancingConfig(config); + } catch (e) { + log( + LogVerbosity.DEBUG, + 'Config parsing failed with error', + (e as Error).message + ); + continue; + } + } + if (fallbackTodefault) { + if (defaultLoadBalancerType) { + return new registeredLoadBalancerTypes[ + defaultLoadBalancerType + ]!.LoadBalancingConfig(); + } else { + return null; + } + } else { + return null; + } } diff --git a/packages/grpc-js/src/load-balancing-call.ts b/packages/grpc-js/src/load-balancing-call.ts new file mode 100644 index 000000000..764769753 --- /dev/null +++ b/packages/grpc-js/src/load-balancing-call.ts @@ -0,0 +1,378 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { CallCredentials } from './call-credentials'; +import { + Call, + DeadlineInfoProvider, + InterceptingListener, + MessageContext, + StatusObject, +} from './call-interface'; +import { SubchannelCall } from './subchannel-call'; +import { ConnectivityState } from './connectivity-state'; +import { LogVerbosity, Status } from './constants'; +import { Deadline, formatDateDifference, getDeadlineTimeoutString } from './deadline'; +import { InternalChannel } from './internal-channel'; +import { Metadata } from './metadata'; +import { PickResultType } from './picker'; +import { CallConfig } from './resolver'; +import { splitHostPort } from './uri-parser'; +import * as logging from './logging'; +import { restrictControlPlaneStatusCode } from './control-plane-status'; +import * as http2 from 'http2'; + +const TRACER_NAME = 'load_balancing_call'; + +export type RpcProgress = 'NOT_STARTED' | 'DROP' | 'REFUSED' | 'PROCESSED'; + +export interface StatusObjectWithProgress extends StatusObject { + progress: RpcProgress; +} + +export interface LoadBalancingCallInterceptingListener + extends InterceptingListener { + onReceiveStatus(status: StatusObjectWithProgress): void; +} + +export class LoadBalancingCall implements Call, DeadlineInfoProvider { + private child: SubchannelCall | null = null; + private readPending = false; + private pendingMessage: { context: MessageContext; message: Buffer } | null = + null; + private pendingHalfClose = false; + private ended = false; + private serviceUrl: string; + private metadata: Metadata | null = null; + private listener: InterceptingListener | null = null; + private onCallEnded: ((statusCode: Status) => void) | null = null; + private startTime: Date; + private childStartTime: Date | null = null; + constructor( + private readonly channel: InternalChannel, + private readonly callConfig: CallConfig, + private readonly methodName: string, + private readonly host: string, + private readonly credentials: CallCredentials, + private readonly deadline: Deadline, + private readonly callNumber: number + ) { + const splitPath: string[] = this.methodName.split('/'); + let serviceName = ''; + /* The standard path format is "/{serviceName}/{methodName}", so if we split + * by '/', the first item should be empty and the second should be the + * service name */ + if (splitPath.length >= 2) { + serviceName = splitPath[1]; + } + const hostname = splitHostPort(this.host)?.host ?? 'localhost'; + /* Currently, call credentials are only allowed on HTTPS connections, so we + * can assume that the scheme is "https" */ + this.serviceUrl = `https://${hostname}/${serviceName}`; + this.startTime = new Date(); + } + getDeadlineInfo(): string[] { + const deadlineInfo: string[] = []; + if (this.childStartTime) { + if (this.childStartTime > this.startTime) { + if (this.metadata?.getOptions().waitForReady) { + deadlineInfo.push('wait_for_ready'); + } + deadlineInfo.push(`LB pick: ${formatDateDifference(this.startTime, this.childStartTime)}`); + } + deadlineInfo.push(...this.child!.getDeadlineInfo()); + return deadlineInfo; + } else { + if (this.metadata?.getOptions().waitForReady) { + deadlineInfo.push('wait_for_ready'); + } + deadlineInfo.push('Waiting for LB pick'); + } + return deadlineInfo; + } + + private trace(text: string): void { + logging.trace( + LogVerbosity.DEBUG, + TRACER_NAME, + '[' + this.callNumber + '] ' + text + ); + } + + private outputStatus(status: StatusObject, progress: RpcProgress) { + if (!this.ended) { + this.ended = true; + this.trace( + 'ended with status: code=' + + status.code + + ' details="' + + status.details + + '" start time=' + + this.startTime.toISOString() + ); + const finalStatus = { ...status, progress }; + this.listener?.onReceiveStatus(finalStatus); + this.onCallEnded?.(finalStatus.code); + } + } + + doPick() { + if (this.ended) { + return; + } + if (!this.metadata) { + throw new Error('doPick called before start'); + } + this.trace('Pick called'); + const finalMetadata = this.metadata.clone(); + const pickResult = this.channel.doPick( + finalMetadata, + this.callConfig.pickInformation + ); + const subchannelString = pickResult.subchannel + ? '(' + + pickResult.subchannel.getChannelzRef().id + + ') ' + + pickResult.subchannel.getAddress() + : '' + pickResult.subchannel; + this.trace( + 'Pick result: ' + + PickResultType[pickResult.pickResultType] + + ' subchannel: ' + + subchannelString + + ' status: ' + + pickResult.status?.code + + ' ' + + pickResult.status?.details + ); + switch (pickResult.pickResultType) { + case PickResultType.COMPLETE: + this.credentials + .generateMetadata({ service_url: this.serviceUrl }) + .then( + credsMetadata => { + /* If this call was cancelled (e.g. by the deadline) before + * metadata generation finished, we shouldn't do anything with + * it. */ + if (this.ended) { + this.trace( + 'Credentials metadata generation finished after call ended' + ); + return; + } + finalMetadata.merge(credsMetadata); + if (finalMetadata.get('authorization').length > 1) { + this.outputStatus( + { + code: Status.INTERNAL, + details: + '"authorization" metadata cannot have multiple values', + metadata: new Metadata(), + }, + 'PROCESSED' + ); + } + if ( + pickResult.subchannel!.getConnectivityState() !== + ConnectivityState.READY + ) { + this.trace( + 'Picked subchannel ' + + subchannelString + + ' has state ' + + ConnectivityState[ + pickResult.subchannel!.getConnectivityState() + ] + + ' after getting credentials metadata. Retrying pick' + ); + this.doPick(); + return; + } + + if (this.deadline !== Infinity) { + finalMetadata.set( + 'grpc-timeout', + getDeadlineTimeoutString(this.deadline) + ); + } + try { + this.child = pickResult + .subchannel!.getRealSubchannel() + .createCall(finalMetadata, this.host, this.methodName, { + onReceiveMetadata: metadata => { + this.trace('Received metadata'); + this.listener!.onReceiveMetadata(metadata); + }, + onReceiveMessage: message => { + this.trace('Received message'); + this.listener!.onReceiveMessage(message); + }, + onReceiveStatus: status => { + this.trace('Received status'); + if ( + status.rstCode === + http2.constants.NGHTTP2_REFUSED_STREAM + ) { + this.outputStatus(status, 'REFUSED'); + } else { + this.outputStatus(status, 'PROCESSED'); + } + }, + }); + this.childStartTime = new Date(); + } catch (error) { + this.trace( + 'Failed to start call on picked subchannel ' + + subchannelString + + ' with error ' + + (error as Error).message + ); + this.outputStatus( + { + code: Status.INTERNAL, + details: + 'Failed to start HTTP/2 stream with error ' + + (error as Error).message, + metadata: new Metadata(), + }, + 'NOT_STARTED' + ); + return; + } + this.callConfig.onCommitted?.(); + pickResult.onCallStarted?.(); + this.onCallEnded = pickResult.onCallEnded; + this.trace( + 'Created child call [' + this.child.getCallNumber() + ']' + ); + if (this.readPending) { + this.child.startRead(); + } + if (this.pendingMessage) { + this.child.sendMessageWithContext( + this.pendingMessage.context, + this.pendingMessage.message + ); + } + if (this.pendingHalfClose) { + this.child.halfClose(); + } + }, + (error: Error & { code: number }) => { + // We assume the error code isn't 0 (Status.OK) + const { code, details } = restrictControlPlaneStatusCode( + typeof error.code === 'number' ? error.code : Status.UNKNOWN, + `Getting metadata from plugin failed with error: ${error.message}` + ); + this.outputStatus( + { + code: code, + details: details, + metadata: new Metadata(), + }, + 'PROCESSED' + ); + } + ); + break; + case PickResultType.DROP: + const { code, details } = restrictControlPlaneStatusCode( + pickResult.status!.code, + pickResult.status!.details + ); + setImmediate(() => { + this.outputStatus( + { code, details, metadata: pickResult.status!.metadata }, + 'DROP' + ); + }); + break; + case PickResultType.TRANSIENT_FAILURE: + if (this.metadata.getOptions().waitForReady) { + this.channel.queueCallForPick(this); + } else { + const { code, details } = restrictControlPlaneStatusCode( + pickResult.status!.code, + pickResult.status!.details + ); + setImmediate(() => { + this.outputStatus( + { code, details, metadata: pickResult.status!.metadata }, + 'PROCESSED' + ); + }); + } + break; + case PickResultType.QUEUE: + this.channel.queueCallForPick(this); + } + } + + cancelWithStatus(status: Status, details: string): void { + this.trace( + 'cancelWithStatus code: ' + status + ' details: "' + details + '"' + ); + this.child?.cancelWithStatus(status, details); + this.outputStatus( + { code: status, details: details, metadata: new Metadata() }, + 'PROCESSED' + ); + } + getPeer(): string { + return this.child?.getPeer() ?? this.channel.getTarget(); + } + start( + metadata: Metadata, + listener: LoadBalancingCallInterceptingListener + ): void { + this.trace('start called'); + this.listener = listener; + this.metadata = metadata; + this.doPick(); + } + sendMessageWithContext(context: MessageContext, message: Buffer): void { + this.trace('write() called with message of length ' + message.length); + if (this.child) { + this.child.sendMessageWithContext(context, message); + } else { + this.pendingMessage = { context, message }; + } + } + startRead(): void { + this.trace('startRead called'); + if (this.child) { + this.child.startRead(); + } else { + this.readPending = true; + } + } + halfClose(): void { + this.trace('halfClose called'); + if (this.child) { + this.child.halfClose(); + } else { + this.pendingHalfClose = true; + } + } + setCredentials(credentials: CallCredentials): void { + throw new Error('Method not implemented.'); + } + + getCallNumber(): number { + return this.callNumber; + } +} diff --git a/packages/grpc-js/src/logging.ts b/packages/grpc-js/src/logging.ts index 71683dbf7..2279d3b65 100644 --- a/packages/grpc-js/src/logging.ts +++ b/packages/grpc-js/src/logging.ts @@ -16,11 +16,27 @@ */ import { LogVerbosity } from './constants'; +import { pid } from 'process'; -let _logger: Partial = console; +const clientVersion = require('../../package.json').version; + +const DEFAULT_LOGGER: Partial = { + error: (message?: any, ...optionalParams: any[]) => { + console.error('E ' + message, ...optionalParams); + }, + info: (message?: any, ...optionalParams: any[]) => { + console.error('I ' + message, ...optionalParams); + }, + debug: (message?: any, ...optionalParams: any[]) => { + console.error('D ' + message, ...optionalParams); + }, +}; + +let _logger: Partial = DEFAULT_LOGGER; let _logVerbosity: LogVerbosity = LogVerbosity.ERROR; -const verbosityString = process.env.GRPC_NODE_VERBOSITY ?? process.env.GRPC_VERBOSITY ?? ''; +const verbosityString = + process.env.GRPC_NODE_VERBOSITY ?? process.env.GRPC_VERBOSITY ?? ''; switch (verbosityString.toUpperCase()) { case 'DEBUG': @@ -53,19 +69,39 @@ export const setLoggerVerbosity = (verbosity: LogVerbosity): void => { // eslint-disable-next-line @typescript-eslint/no-explicit-any export const log = (severity: LogVerbosity, ...args: any[]): void => { - if (severity >= _logVerbosity && typeof _logger.error === 'function') { - _logger.error(...args); + let logFunction: typeof DEFAULT_LOGGER.error; + if (severity >= _logVerbosity) { + switch (severity) { + case LogVerbosity.DEBUG: + logFunction = _logger.debug; + break; + case LogVerbosity.INFO: + logFunction = _logger.info; + break; + case LogVerbosity.ERROR: + logFunction = _logger.error; + break; + } + /* Fall back to _logger.error when other methods are not available for + * compatiblity with older behavior that always logged to _logger.error */ + if (!logFunction) { + logFunction = _logger.error; + } + if (logFunction) { + logFunction.bind(_logger)(...args); + } } }; -const tracersString = process.env.GRPC_NODE_TRACE ?? process.env.GRPC_TRACE ?? ''; +const tracersString = + process.env.GRPC_NODE_TRACE ?? process.env.GRPC_TRACE ?? ''; const enabledTracers = new Set(); const disabledTracers = new Set(); for (const tracerName of tracersString.split(',')) { if (tracerName.startsWith('-')) { disabledTracers.add(tracerName.substring(1)); } else { - enabledTracers.add(tracerName) + enabledTracers.add(tracerName); } } const allEnabled = enabledTracers.has('all'); @@ -75,7 +111,24 @@ export function trace( tracer: string, text: string ): void { - if (!disabledTracers.has(tracer) && (allEnabled || enabledTracers.has(tracer))) { - log(severity, new Date().toISOString() + ' | ' + tracer + ' | ' + text); + if (isTracerEnabled(tracer)) { + log( + severity, + new Date().toISOString() + + ' | v' + + clientVersion + + ' ' + + pid + + ' | ' + + tracer + + ' | ' + + text + ); } } + +export function isTracerEnabled(tracer: string): boolean { + return ( + !disabledTracers.has(tracer) && (allEnabled || enabledTracers.has(tracer)) + ); +} diff --git a/packages/grpc-js/src/make-client.ts b/packages/grpc-js/src/make-client.ts index a6cb91007..10d1e959c 100644 --- a/packages/grpc-js/src/make-client.ts +++ b/packages/grpc-js/src/make-client.ts @@ -91,6 +91,7 @@ export interface ServiceClientConstructor { options?: Partial ): ServiceClient; service: ServiceDefinition; + serviceName: string; } /** @@ -98,7 +99,7 @@ export interface ServiceClientConstructor { * keys. * @param key key for check, string. */ -function isPrototypePolluted(key: string): Boolean { +function isPrototypePolluted(key: string): boolean { return ['__proto__', 'prototype', 'constructor'].includes(key); } @@ -127,10 +128,11 @@ export function makeClientConstructor( class ServiceClientImpl extends Client implements ServiceClient { static service: ServiceDefinition; + static serviceName: string; [methodName: string]: Function; } - Object.keys(methods).forEach((name) => { + Object.keys(methods).forEach(name => { if (isPrototypePolluted(name)) { return; } @@ -171,6 +173,7 @@ export function makeClientConstructor( }); ServiceClientImpl.service = methods; + ServiceClientImpl.serviceName = serviceName; return ServiceClientImpl; } diff --git a/packages/grpc-js/src/max-message-size-filter.ts b/packages/grpc-js/src/max-message-size-filter.ts deleted file mode 100644 index f820c02e6..000000000 --- a/packages/grpc-js/src/max-message-size-filter.ts +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -import { BaseFilter, Filter, FilterFactory } from "./filter"; -import { Call, WriteObject } from "./call-stream"; -import { Status, DEFAULT_MAX_SEND_MESSAGE_LENGTH, DEFAULT_MAX_RECEIVE_MESSAGE_LENGTH } from "./constants"; -import { ChannelOptions } from "./channel-options"; - -export class MaxMessageSizeFilter extends BaseFilter implements Filter { - private maxSendMessageSize: number = DEFAULT_MAX_SEND_MESSAGE_LENGTH; - private maxReceiveMessageSize: number = DEFAULT_MAX_RECEIVE_MESSAGE_LENGTH; - constructor( - private readonly options: ChannelOptions, - private readonly callStream: Call - ) { - super(); - if ('grpc.max_send_message_length' in options) { - this.maxSendMessageSize = options['grpc.max_send_message_length']!; - } - if ('grpc.max_receive_message_length' in options) { - this.maxReceiveMessageSize = options['grpc.max_receive_message_length']!; - } - } - - async sendMessage(message: Promise): Promise { - /* A configured size of -1 means that there is no limit, so skip the check - * entirely */ - if (this.maxSendMessageSize === -1) { - return message; - } else { - const concreteMessage = await message; - if (concreteMessage.message.length > this.maxSendMessageSize) { - this.callStream.cancelWithStatus(Status.RESOURCE_EXHAUSTED, `Sent message larger than max (${concreteMessage.message.length} vs. ${this.maxSendMessageSize})`); - return Promise.reject('Message too large'); - } else { - return concreteMessage; - } - } - } - - async receiveMessage(message: Promise): Promise { - /* A configured size of -1 means that there is no limit, so skip the check - * entirely */ - if (this.maxReceiveMessageSize === -1) { - return message; - } else { - const concreteMessage = await message; - if (concreteMessage.length > this.maxReceiveMessageSize) { - this.callStream.cancelWithStatus(Status.RESOURCE_EXHAUSTED, `Received message larger than max (${concreteMessage.length} vs. ${this.maxReceiveMessageSize})`); - return Promise.reject('Message too large'); - } else { - return concreteMessage; - } - } - } -} - -export class MaxMessageSizeFilterFactory implements FilterFactory { - constructor(private readonly options: ChannelOptions) {} - - createFilter(callStream: Call): MaxMessageSizeFilter { - return new MaxMessageSizeFilter(this.options, callStream); - } -} diff --git a/packages/grpc-js/src/metadata.ts b/packages/grpc-js/src/metadata.ts index 4947bf0ad..eabd2dff4 100644 --- a/packages/grpc-js/src/metadata.ts +++ b/packages/grpc-js/src/metadata.ts @@ -18,6 +18,7 @@ import * as http2 from 'http2'; import { log } from './logging'; import { LogVerbosity } from './constants'; +import { getErrorMessage } from './error'; const LEGAL_KEY_REGEX = /^[0-9a-z_.-]+$/; const LEGAL_NON_BINARY_VALUE_REGEX = /^[ -~]*$/; @@ -48,13 +49,14 @@ function validate(key: string, value?: MetadataValue): void { if (!isLegalKey(key)) { throw new Error('Metadata key "' + key + '" contains illegal characters'); } + if (value !== null && value !== undefined) { if (isBinaryKey(key)) { - if (!(value instanceof Buffer)) { + if (!Buffer.isBuffer(value)) { throw new Error("keys that end with '-bin' must have Buffer values"); } } else { - if (value instanceof Buffer) { + if (Buffer.isBuffer(value)) { throw new Error( "keys that don't end with '-bin' must have String values" ); @@ -88,12 +90,8 @@ export class Metadata { protected internalRepr: MetadataObject = new Map(); private options: MetadataOptions; - constructor(options?: MetadataOptions) { - if (options === undefined) { - this.options = {}; - } else { - this.options = options; - } + constructor(options: MetadataOptions = {}) { + this.options = options; } /** @@ -120,9 +118,8 @@ export class Metadata { key = normalizeKey(key); validate(key, value); - const existingValue: MetadataValue[] | undefined = this.internalRepr.get( - key - ); + const existingValue: MetadataValue[] | undefined = + this.internalRepr.get(key); if (existingValue === undefined) { this.internalRepr.set(key, [value]); @@ -137,7 +134,7 @@ export class Metadata { */ remove(key: string): void { key = normalizeKey(key); - validate(key); + // validate(key); this.internalRepr.delete(key); } @@ -148,7 +145,7 @@ export class Metadata { */ get(key: string): MetadataValue[] { key = normalizeKey(key); - validate(key); + // validate(key); return this.internalRepr.get(key) || []; } @@ -160,12 +157,12 @@ export class Metadata { getMap(): { [key: string]: MetadataValue } { const result: { [key: string]: MetadataValue } = {}; - this.internalRepr.forEach((values, key) => { + for (const [key, values] of this.internalRepr) { if (values.length > 0) { const v = values[0]; - result[key] = v instanceof Buffer ? v.slice() : v; + result[key] = Buffer.isBuffer(v) ? Buffer.from(v) : v; } - }); + } return result; } @@ -177,9 +174,9 @@ export class Metadata { const newMetadata = new Metadata(this.options); const newInternalRepr = newMetadata.internalRepr; - this.internalRepr.forEach((value, key) => { - const clonedValue: MetadataValue[] = value.map((v) => { - if (v instanceof Buffer) { + for (const [key, value] of this.internalRepr) { + const clonedValue: MetadataValue[] = value.map(v => { + if (Buffer.isBuffer(v)) { return Buffer.from(v); } else { return v; @@ -187,7 +184,7 @@ export class Metadata { }); newInternalRepr.set(key, clonedValue); - }); + } return newMetadata; } @@ -200,13 +197,13 @@ export class Metadata { * @param other A Metadata object. */ merge(other: Metadata): void { - other.internalRepr.forEach((values, key) => { + for (const [key, values] of other.internalRepr) { const mergedValue: MetadataValue[] = ( this.internalRepr.get(key) || [] ).concat(values); this.internalRepr.set(key, mergedValue); - }); + } } setOptions(options: MetadataOptions) { @@ -223,23 +220,26 @@ export class Metadata { toHttp2Headers(): http2.OutgoingHttpHeaders { // NOTE: Node <8.9 formats http2 headers incorrectly. const result: http2.OutgoingHttpHeaders = {}; - this.internalRepr.forEach((values, key) => { + + for (const [key, values] of this.internalRepr) { // We assume that the user's interaction with this object is limited to // through its public API (i.e. keys and values are already validated). - result[key] = values.map((value) => { - if (value instanceof Buffer) { - return value.toString('base64'); - } else { - return value; - } - }); - }); + result[key] = values.map(bufToString); + } + return result; } - // For compatibility with the other Metadata implementation - private _getCoreRepresentation() { - return this.internalRepr; + /** + * This modifies the behavior of JSON.stringify to show an object + * representation of the metadata map. + */ + toJSON() { + const result: { [key: string]: MetadataValue[] } = {}; + for (const [key, values] of this.internalRepr) { + result[key] = values; + } + return result; } /** @@ -249,10 +249,10 @@ export class Metadata { */ static fromHttp2Headers(headers: http2.IncomingHttpHeaders): Metadata { const result = new Metadata(); - Object.keys(headers).forEach((key) => { + for (const key of Object.keys(headers)) { // Reserved headers (beginning with `:`) are not valid keys. if (key.charAt(0) === ':') { - return; + continue; } const values = headers[key]; @@ -260,12 +260,12 @@ export class Metadata { try { if (isBinaryKey(key)) { if (Array.isArray(values)) { - values.forEach((value) => { + values.forEach(value => { result.add(key, Buffer.from(value, 'base64')); }); } else if (values !== undefined) { if (isCustomMetadata(key)) { - values.split(',').forEach((v) => { + values.split(',').forEach(v => { result.add(key, Buffer.from(v.trim(), 'base64')); }); } else { @@ -274,7 +274,7 @@ export class Metadata { } } else { if (Array.isArray(values)) { - values.forEach((value) => { + values.forEach(value => { result.add(key, value); }); } else if (values !== undefined) { @@ -282,10 +282,17 @@ export class Metadata { } } } catch (error) { - const message = `Failed to add metadata entry ${key}: ${values}. ${error.message}. For more information see https://github.com/grpc/grpc-node/issues/1173`; + const message = `Failed to add metadata entry ${key}: ${values}. ${getErrorMessage( + error + )}. For more information see https://github.com/grpc/grpc-node/issues/1173`; log(LogVerbosity.ERROR, message); } - }); + } + return result; } } + +const bufToString = (val: string | Buffer): string => { + return Buffer.isBuffer(val) ? val.toString('base64') : val; +}; diff --git a/packages/grpc-js/src/object-stream.ts b/packages/grpc-js/src/object-stream.ts index b17058a7a..49ef1f33b 100644 --- a/packages/grpc-js/src/object-stream.ts +++ b/packages/grpc-js/src/object-stream.ts @@ -15,7 +15,7 @@ * */ -import { Duplex, Readable, Writable } from 'stream'; +import { Readable, Writable } from 'stream'; import { EmitterAugmentation1 } from './events'; /* eslint-disable @typescript-eslint/no-explicit-any */ @@ -36,9 +36,16 @@ export interface IntermediateObjectWritable extends Writable { write(chunk: any & T, cb?: WriteCallback): boolean; write(chunk: any & T, encoding?: any, cb?: WriteCallback): boolean; setDefaultEncoding(encoding: string): this; - end(): void; - end(chunk: any & T, cb?: Function): void; - end(chunk: any & T, encoding?: any, cb?: Function): void; + end(): ReturnType extends Writable ? this : void; + end( + chunk: any & T, + cb?: Function + ): ReturnType extends Writable ? this : void; + end( + chunk: any & T, + encoding?: any, + cb?: Function + ): ReturnType extends Writable ? this : void; } export interface ObjectWritable extends IntermediateObjectWritable { @@ -46,20 +53,14 @@ export interface ObjectWritable extends IntermediateObjectWritable { write(chunk: T, cb?: Function): boolean; write(chunk: T, encoding?: any, cb?: Function): boolean; setDefaultEncoding(encoding: string): this; - end(): void; - end(chunk: T, cb?: Function): void; - end(chunk: T, encoding?: any, cb?: Function): void; + end(): ReturnType extends Writable ? this : void; + end( + chunk: T, + cb?: Function + ): ReturnType extends Writable ? this : void; + end( + chunk: T, + encoding?: any, + cb?: Function + ): ReturnType extends Writable ? this : void; } - -export type ObjectDuplex = { - read(size?: number): U; - - _write(chunk: T, encoding: string, callback: Function): void; - write(chunk: T, cb?: Function): boolean; - write(chunk: T, encoding?: any, cb?: Function): boolean; - end(): void; - end(chunk: T, cb?: Function): void; - end(chunk: T, encoding?: any, cb?: Function): void; -} & Duplex & - ObjectWritable & - ObjectReadable; diff --git a/packages/grpc-js/src/picker.ts b/packages/grpc-js/src/picker.ts index 6df61b59a..ac79c9fee 100644 --- a/packages/grpc-js/src/picker.ts +++ b/packages/grpc-js/src/picker.ts @@ -15,12 +15,11 @@ * */ -import { Subchannel } from './subchannel'; -import { StatusObject } from './call-stream'; +import { StatusObject } from './call-interface'; import { Metadata } from './metadata'; import { Status } from './constants'; import { LoadBalancer } from './load-balancer'; -import { FilterFactory, Filter } from './filter'; +import { SubchannelInterface } from './subchannel-interface'; export enum PickResultType { COMPLETE, @@ -36,56 +35,51 @@ export interface PickResult { * `pickResultType` is COMPLETE. If null, indicates that the call should be * dropped. */ - subchannel: Subchannel | null; + subchannel: SubchannelInterface | null; /** * The status object to end the call with. Populated if and only if * `pickResultType` is TRANSIENT_FAILURE. */ status: StatusObject | null; - /** - * Extra FilterFactory (can be multiple encapsulated in a FilterStackFactory) - * provided by the load balancer to be used with the call. For technical - * reasons filters from this factory will not see sendMetadata events. - */ - extraFilterFactory: FilterFactory | null; onCallStarted: (() => void) | null; + onCallEnded: ((statusCode: Status) => void) | null; } export interface CompletePickResult extends PickResult { pickResultType: PickResultType.COMPLETE; - subchannel: Subchannel | null; + subchannel: SubchannelInterface | null; status: null; - extraFilterFactory: FilterFactory | null; onCallStarted: (() => void) | null; + onCallEnded: ((statusCode: Status) => void) | null; } export interface QueuePickResult extends PickResult { pickResultType: PickResultType.QUEUE; subchannel: null; status: null; - extraFilterFactory: null; onCallStarted: null; + onCallEnded: null; } export interface TransientFailurePickResult extends PickResult { pickResultType: PickResultType.TRANSIENT_FAILURE; subchannel: null; status: StatusObject; - extraFilterFactory: null; onCallStarted: null; + onCallEnded: null; } export interface DropCallPickResult extends PickResult { pickResultType: PickResultType.DROP; subchannel: null; status: StatusObject; - extraFilterFactory: null; onCallStarted: null; + onCallEnded: null; } export interface PickArgs { metadata: Metadata; - extraPickInfo: {[key: string]: string}; + extraPickInfo: { [key: string]: string }; } /** @@ -103,24 +97,21 @@ export interface Picker { */ export class UnavailablePicker implements Picker { private status: StatusObject; - constructor(status?: StatusObject) { - if (status !== undefined) { - this.status = status; - } else { - this.status = { - code: Status.UNAVAILABLE, - details: 'No connection established', - metadata: new Metadata(), - }; - } + constructor(status?: Partial) { + this.status = { + code: Status.UNAVAILABLE, + details: 'No connection established', + metadata: new Metadata(), + ...status, + }; } pick(pickArgs: PickArgs): TransientFailurePickResult { return { pickResultType: PickResultType.TRANSIENT_FAILURE, subchannel: null, status: this.status, - extraFilterFactory: null, onCallStarted: null, + onCallEnded: null, }; } } @@ -131,25 +122,34 @@ export class UnavailablePicker implements Picker { * indicating that the pick should be tried again with the next `Picker`. Also * reports back to the load balancer that a connection should be established * once any pick is attempted. + * If the childPicker is provided, delegate to it instead of returning the + * hardcoded QUEUE pick result, but still calls exitIdle. */ export class QueuePicker { private calledExitIdle = false; // Constructed with a load balancer. Calls exitIdle on it the first time pick is called - constructor(private loadBalancer: LoadBalancer) {} + constructor( + private loadBalancer: LoadBalancer, + private childPicker?: Picker + ) {} - pick(pickArgs: PickArgs): QueuePickResult { + pick(pickArgs: PickArgs): PickResult { if (!this.calledExitIdle) { process.nextTick(() => { this.loadBalancer.exitIdle(); }); this.calledExitIdle = true; } - return { - pickResultType: PickResultType.QUEUE, - subchannel: null, - status: null, - extraFilterFactory: null, - onCallStarted: null, - }; + if (this.childPicker) { + return this.childPicker.pick(pickArgs); + } else { + return { + pickResultType: PickResultType.QUEUE, + subchannel: null, + status: null, + onCallStarted: null, + onCallEnded: null, + }; + } } } diff --git a/packages/grpc-js/src/resolver-dns.ts b/packages/grpc-js/src/resolver-dns.ts index 67f1f8c45..6463c2656 100644 --- a/packages/grpc-js/src/resolver-dns.ts +++ b/packages/grpc-js/src/resolver-dns.ts @@ -24,14 +24,15 @@ import * as dns from 'dns'; import * as util from 'util'; import { extractAndSelectServiceConfig, ServiceConfig } from './service-config'; import { Status } from './constants'; -import { StatusObject } from './call-stream'; +import { StatusObject } from './call-interface'; import { Metadata } from './metadata'; import * as logging from './logging'; import { LogVerbosity } from './constants'; -import { SubchannelAddress, TcpSubchannelAddress } from './subchannel'; +import { Endpoint, TcpSubchannelAddress } from './subchannel-address'; import { GrpcUri, uriToString, splitHostPort } from './uri-parser'; import { isIPv6, isIPv4 } from 'net'; import { ChannelOptions } from './channel-options'; +import { BackoffOptions, BackoffTimeout } from './backoff-timeout'; const TRACER_NAME = 'dns_resolver'; @@ -42,49 +43,39 @@ function trace(text: string): void { /** * The default TCP port to connect to if not explicitly specified in the target. */ -const DEFAULT_PORT = 443; +export const DEFAULT_PORT = 443; + +const DEFAULT_MIN_TIME_BETWEEN_RESOLUTIONS_MS = 30_000; const resolveTxtPromise = util.promisify(dns.resolveTxt); const dnsLookupPromise = util.promisify(dns.lookup); -/** - * Merge any number of arrays into a single alternating array - * @param arrays - */ -function mergeArrays(...arrays: T[][]): T[] { - const result: T[] = []; - for ( - let i = 0; - i < - Math.max.apply( - null, - arrays.map((array) => array.length) - ); - i++ - ) { - for (const array of arrays) { - if (i < array.length) { - result.push(array[i]); - } - } - } - return result; -} - /** * Resolver implementation that handles DNS names and IP addresses. */ class DnsResolver implements Resolver { - private readonly ipResult: SubchannelAddress[] | null; + private readonly ipResult: Endpoint[] | null; private readonly dnsHostname: string | null; private readonly port: number | null; + /** + * Minimum time between resolutions, measured as the time between starting + * successive resolution requests. Only applies to successful resolutions. + * Failures are handled by the backoff timer. + */ + private readonly minTimeBetweenResolutionsMs: number; private pendingLookupPromise: Promise | null = null; private pendingTxtPromise: Promise | null = null; - private latestLookupResult: TcpSubchannelAddress[] | null = null; + private latestLookupResult: Endpoint[] | null = null; private latestServiceConfig: ServiceConfig | null = null; private latestServiceConfigError: StatusObject | null = null; private percentage: number; private defaultResolutionError: StatusObject; + private backoff: BackoffTimeout; + private continueResolving = false; + private nextResolutionTimer: NodeJS.Timeout; + private isNextResolutionTimerRunning = false; + private isServiceConfigEnabled = true; + private returnedIpResult = false; constructor( private target: GrpcUri, private listener: ResolverListener, @@ -100,8 +91,12 @@ class DnsResolver implements Resolver { if (isIPv4(hostPort.host) || isIPv6(hostPort.host)) { this.ipResult = [ { - host: hostPort.host, - port: hostPort.port ?? DEFAULT_PORT, + addresses: [ + { + host: hostPort.host, + port: hostPort.port ?? DEFAULT_PORT, + }, + ], }, ]; this.dnsHostname = null; @@ -114,11 +109,33 @@ class DnsResolver implements Resolver { } this.percentage = Math.random() * 100; + if (channelOptions['grpc.service_config_disable_resolution'] === 1) { + this.isServiceConfigEnabled = false; + } + this.defaultResolutionError = { code: Status.UNAVAILABLE, details: `Name resolution failed for target ${uriToString(this.target)}`, metadata: new Metadata(), }; + + const backoffOptions: BackoffOptions = { + initialDelay: channelOptions['grpc.initial_reconnect_backoff_ms'], + maxDelay: channelOptions['grpc.max_reconnect_backoff_ms'], + }; + + this.backoff = new BackoffTimeout(() => { + if (this.continueResolving) { + this.startResolutionWithBackoff(); + } + }, backoffOptions); + this.backoff.unref(); + + this.minTimeBetweenResolutionsMs = + channelOptions['grpc.dns_min_time_between_resolutions_ms'] ?? + DEFAULT_MIN_TIME_BETWEEN_RESOLUTIONS_MS; + this.nextResolutionTimer = setTimeout(() => {}, 0); + clearTimeout(this.nextResolutionTimer); } /** @@ -127,13 +144,26 @@ class DnsResolver implements Resolver { */ private startResolution() { if (this.ipResult !== null) { - trace('Returning IP address for target ' + uriToString(this.target)); - setImmediate(() => { - this.listener.onSuccessfulResolution(this.ipResult!, null, null, null, {}); - }); + if (!this.returnedIpResult) { + trace('Returning IP address for target ' + uriToString(this.target)); + setImmediate(() => { + this.listener.onSuccessfulResolution( + this.ipResult!, + null, + null, + null, + {} + ); + }); + this.returnedIpResult = true; + } + this.backoff.stop(); + this.backoff.reset(); + this.stopNextResolutionTimer(); return; } if (this.dnsHostname === null) { + trace('Failed to parse DNS address ' + uriToString(this.target)); setImmediate(() => { this.listener.onError({ code: Status.UNAVAILABLE, @@ -141,7 +171,12 @@ class DnsResolver implements Resolver { metadata: new Metadata(), }); }); + this.stopNextResolutionTimer(); } else { + if (this.pendingLookupPromise !== null) { + return; + } + trace('Looking up DNS hostname ' + this.dnsHostname); /* We clear out latestLookupResult here to ensure that it contains the * latest result since the last time we started resolving. That way, the * TXT resolution handler can use it, but only if it finishes second. We @@ -156,22 +191,23 @@ class DnsResolver implements Resolver { * error is indistinguishable from other kinds of errors */ this.pendingLookupPromise = dnsLookupPromise(hostname, { all: true }); this.pendingLookupPromise.then( - (addressList) => { + addressList => { + if (this.pendingLookupPromise === null) { + return; + } this.pendingLookupPromise = null; - const ip4Addresses: dns.LookupAddress[] = addressList.filter( - (addr) => addr.family === 4 - ); - const ip6Addresses: dns.LookupAddress[] = addressList.filter( - (addr) => addr.family === 6 + this.backoff.reset(); + this.backoff.stop(); + const subchannelAddresses: TcpSubchannelAddress[] = addressList.map( + addr => ({ host: addr.address, port: +this.port! }) ); - this.latestLookupResult = mergeArrays( - ip6Addresses, - ip4Addresses - ).map((addr) => ({ host: addr.address, port: +this.port! })); + this.latestLookupResult = subchannelAddresses.map(address => ({ + addresses: [address], + })); const allAddressesString: string = '[' + - this.latestLookupResult - .map((addr) => addr.host + ':' + addr.port) + subchannelAddresses + .map(addr => addr.host + ':' + addr.port) .join(',') + ']'; trace( @@ -196,7 +232,10 @@ class DnsResolver implements Resolver { {} ); }, - (err) => { + err => { + if (this.pendingLookupPromise === null) { + return; + } trace( 'Resolution error for target ' + uriToString(this.target) + @@ -204,18 +243,22 @@ class DnsResolver implements Resolver { (err as Error).message ); this.pendingLookupPromise = null; + this.stopNextResolutionTimer(); this.listener.onError(this.defaultResolutionError); } ); /* If there already is a still-pending TXT resolution, we can just use * that result when it comes in */ - if (this.pendingTxtPromise === null) { + if (this.isServiceConfigEnabled && this.pendingTxtPromise === null) { /* We handle the TXT query promise differently than the others because * the name resolution attempt as a whole is a success even if the TXT * lookup fails */ this.pendingTxtPromise = resolveTxtPromise(hostname); this.pendingTxtPromise.then( - (txtRecord) => { + txtRecord => { + if (this.pendingTxtPromise === null) { + return; + } this.pendingTxtPromise = null; try { this.latestServiceConfig = extractAndSelectServiceConfig( @@ -225,7 +268,9 @@ class DnsResolver implements Resolver { } catch (err) { this.latestServiceConfigError = { code: Status.UNAVAILABLE, - details: 'Parsing service config failed', + details: `Parsing service config failed with error ${ + (err as Error).message + }`, metadata: new Metadata(), }; } @@ -243,7 +288,7 @@ class DnsResolver implements Resolver { ); } }, - (err) => { + err => { /* If TXT lookup fails we should do nothing, which means that we * continue to use the result of the most recent successful lookup, * or the default null config object if there has never been a @@ -257,17 +302,72 @@ class DnsResolver implements Resolver { } } - updateResolution() { - trace('Resolution update requested for target ' + uriToString(this.target)); + private startNextResolutionTimer() { + clearTimeout(this.nextResolutionTimer); + this.nextResolutionTimer = setTimeout(() => { + this.stopNextResolutionTimer(); + if (this.continueResolving) { + this.startResolutionWithBackoff(); + } + }, this.minTimeBetweenResolutionsMs); + this.nextResolutionTimer.unref?.(); + this.isNextResolutionTimerRunning = true; + } + + private stopNextResolutionTimer() { + clearTimeout(this.nextResolutionTimer); + this.isNextResolutionTimerRunning = false; + } + + private startResolutionWithBackoff() { if (this.pendingLookupPromise === null) { + this.continueResolving = false; + this.backoff.runOnce(); + this.startNextResolutionTimer(); this.startResolution(); } } + updateResolution() { + /* If there is a pending lookup, just let it finish. Otherwise, if the + * nextResolutionTimer or backoff timer is running, set the + * continueResolving flag to resolve when whichever of those timers + * fires. Otherwise, start resolving immediately. */ + if (this.pendingLookupPromise === null) { + if (this.isNextResolutionTimerRunning || this.backoff.isRunning()) { + if (this.isNextResolutionTimerRunning) { + trace( + 'resolution update delayed by "min time between resolutions" rate limit' + ); + } else { + trace( + 'resolution update delayed by backoff timer until ' + + this.backoff.getEndTime().toISOString() + ); + } + this.continueResolving = true; + } else { + this.startResolutionWithBackoff(); + } + } + } + + /** + * Reset the resolver to the same state it had when it was created. In-flight + * DNS requests cannot be cancelled, but they are discarded and their results + * will be ignored. + */ destroy() { - /* Do nothing. There is not a practical way to cancel in-flight DNS - * requests, and after this function is called we can expect that - * updateResolution will not be called again. */ + this.continueResolving = false; + this.backoff.reset(); + this.backoff.stop(); + this.stopNextResolutionTimer(); + this.pendingLookupPromise = null; + this.pendingTxtPromise = null; + this.latestLookupResult = null; + this.latestServiceConfig = null; + this.latestServiceConfigError = null; + this.returnedIpResult = false; } /** diff --git a/packages/grpc-js/src/resolver-ip.ts b/packages/grpc-js/src/resolver-ip.ts index 5c9e29c46..8fed35bd1 100644 --- a/packages/grpc-js/src/resolver-ip.ts +++ b/packages/grpc-js/src/resolver-ip.ts @@ -14,14 +14,14 @@ * limitations under the License. */ -import { isIPv4, isIPv6 } from "net"; -import { StatusObject } from "./call-stream"; -import { ChannelOptions } from "./channel-options"; -import { LogVerbosity, Status } from "./constants"; -import { Metadata } from "./metadata"; -import { registerResolver, Resolver, ResolverListener } from "./resolver"; -import { SubchannelAddress } from "./subchannel"; -import { GrpcUri, splitHostPort, uriToString } from "./uri-parser"; +import { isIPv4, isIPv6 } from 'net'; +import { StatusObject } from './call-interface'; +import { ChannelOptions } from './channel-options'; +import { LogVerbosity, Status } from './constants'; +import { Metadata } from './metadata'; +import { registerResolver, Resolver, ResolverListener } from './resolver'; +import { Endpoint, SubchannelAddress } from './subchannel-address'; +import { GrpcUri, splitHostPort, uriToString } from './uri-parser'; import * as logging from './logging'; const TRACER_NAME = 'ip_resolver'; @@ -39,10 +39,11 @@ const IPV6_SCHEME = 'ipv6'; const DEFAULT_PORT = 443; class IpResolver implements Resolver { - private addresses: SubchannelAddress[] = []; + private endpoints: Endpoint[] = []; private error: StatusObject | null = null; + private hasReturnedResult = false; constructor( - private target: GrpcUri, + target: GrpcUri, private listener: ResolverListener, channelOptions: ChannelOptions ) { @@ -52,7 +53,7 @@ class IpResolver implements Resolver { this.error = { code: Status.UNAVAILABLE, details: `Unrecognized scheme ${target.scheme} in IP resolver`, - metadata: new Metadata() + metadata: new Metadata(), }; return; } @@ -63,37 +64,49 @@ class IpResolver implements Resolver { this.error = { code: Status.UNAVAILABLE, details: `Failed to parse ${target.scheme} address ${path}`, - metadata: new Metadata() + metadata: new Metadata(), }; return; } - if ((target.scheme === IPV4_SCHEME && !isIPv4(hostPort.host)) || (target.scheme === IPV6_SCHEME && !isIPv6(hostPort.host))) { + if ( + (target.scheme === IPV4_SCHEME && !isIPv4(hostPort.host)) || + (target.scheme === IPV6_SCHEME && !isIPv6(hostPort.host)) + ) { this.error = { code: Status.UNAVAILABLE, details: `Failed to parse ${target.scheme} address ${path}`, - metadata: new Metadata() + metadata: new Metadata(), }; return; } addresses.push({ host: hostPort.host, - port: hostPort.port ?? DEFAULT_PORT + port: hostPort.port ?? DEFAULT_PORT, }); } - this.addresses = addresses; - trace('Parsed ' + target.scheme + ' address list ' + this.addresses); + this.endpoints = addresses.map(address => ({ addresses: [address] })); + trace('Parsed ' + target.scheme + ' address list ' + addresses); } updateResolution(): void { - process.nextTick(() => { - if (this.error) { - this.listener.onError(this.error) - } else { - this.listener.onSuccessfulResolution(this.addresses, null, null, null, {}); - } - }); + if (!this.hasReturnedResult) { + this.hasReturnedResult = true; + process.nextTick(() => { + if (this.error) { + this.listener.onError(this.error); + } else { + this.listener.onSuccessfulResolution( + this.endpoints, + null, + null, + null, + {} + ); + } + }); + } } destroy(): void { - // This resolver owns no resources, so we do nothing here. + this.hasReturnedResult = false; } static getDefaultAuthority(target: GrpcUri): string { @@ -104,4 +117,4 @@ class IpResolver implements Resolver { export function setup() { registerResolver(IPV4_SCHEME, IpResolver); registerResolver(IPV6_SCHEME, IpResolver); -} \ No newline at end of file +} diff --git a/packages/grpc-js/src/resolver-uds.ts b/packages/grpc-js/src/resolver-uds.ts index 40502f113..4d84de9d5 100644 --- a/packages/grpc-js/src/resolver-uds.ts +++ b/packages/grpc-js/src/resolver-uds.ts @@ -15,12 +15,13 @@ */ import { Resolver, ResolverListener, registerResolver } from './resolver'; -import { SubchannelAddress } from './subchannel'; +import { Endpoint } from './subchannel-address'; import { GrpcUri } from './uri-parser'; import { ChannelOptions } from './channel-options'; class UdsResolver implements Resolver { - private addresses: SubchannelAddress[] = []; + private hasReturnedResult = false; + private endpoints: Endpoint[] = []; constructor( target: GrpcUri, private listener: ResolverListener, @@ -32,21 +33,24 @@ class UdsResolver implements Resolver { } else { path = target.path; } - this.addresses = [{ path }]; + this.endpoints = [{ addresses: [{ path }] }]; } updateResolution(): void { - process.nextTick( - this.listener.onSuccessfulResolution, - this.addresses, - null, - null, - null, - {} - ); + if (!this.hasReturnedResult) { + this.hasReturnedResult = true; + process.nextTick( + this.listener.onSuccessfulResolution, + this.endpoints, + null, + null, + null, + {} + ); + } } destroy() { - // This resolver owns no resources, so we do nothing here. + this.hasReturnedResult = false; } static getDefaultAuthority(target: GrpcUri): string { diff --git a/packages/grpc-js/src/resolver.ts b/packages/grpc-js/src/resolver.ts index 497f3dfa0..1c84c0490 100644 --- a/packages/grpc-js/src/resolver.ts +++ b/packages/grpc-js/src/resolver.ts @@ -16,21 +16,20 @@ */ import { MethodConfig, ServiceConfig } from './service-config'; -import * as resolver_dns from './resolver-dns'; -import * as resolver_uds from './resolver-uds'; -import * as resolver_ip from './resolver-ip'; -import { StatusObject } from './call-stream'; -import { SubchannelAddress } from './subchannel'; +import { StatusObject } from './call-interface'; +import { Endpoint } from './subchannel-address'; import { GrpcUri, uriToString } from './uri-parser'; import { ChannelOptions } from './channel-options'; import { Metadata } from './metadata'; import { Status } from './constants'; +import { Filter, FilterFactory } from './filter'; export interface CallConfig { methodConfig: MethodConfig; onCommitted?: () => void; - pickInformation: {[key: string]: string}; + pickInformation: { [key: string]: string }; status: Status; + dynamicFilterFactories: FilterFactory[]; } /** @@ -38,7 +37,7 @@ export interface CallConfig { * https://github.com/grpc/proposal/blob/master/A31-xds-timeout-support-and-config-selector.md#new-functionality-in-grpc */ export interface ConfigSelector { - (methodName: string, metadata: Metadata): CallConfig; + (methodName: string, metadata: Metadata, channelId: number): CallConfig; } /** @@ -56,7 +55,7 @@ export interface ResolverListener { * service configuration was invalid */ onSuccessfulResolution( - addressList: SubchannelAddress[], + addressList: Endpoint[], serviceConfig: ServiceConfig | null, serviceConfigError: StatusObject | null, configSelector: ConfigSelector | null, @@ -81,9 +80,12 @@ export interface Resolver { * called synchronously with the constructor or updateResolution. */ updateResolution(): void; - + /** - * Destroy the resolver. Should be called when the owning channel shuts down. + * Discard all resources owned by the resolver. A later call to + * `updateResolution` should reinitialize those resources. No + * `ResolverListener` callbacks should be called after `destroy` is called + * until `updateResolution` is called again. */ destroy(): void; } @@ -176,9 +178,3 @@ export function mapUriDefaultScheme(target: GrpcUri): GrpcUri | null { } return target; } - -export function registerAll() { - resolver_dns.setup(); - resolver_uds.setup(); - resolver_ip.setup(); -} diff --git a/packages/grpc-js/src/resolving-call.ts b/packages/grpc-js/src/resolving-call.ts new file mode 100644 index 000000000..2c81e7883 --- /dev/null +++ b/packages/grpc-js/src/resolving-call.ts @@ -0,0 +1,364 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { CallCredentials } from './call-credentials'; +import { + Call, + CallStreamOptions, + DeadlineInfoProvider, + InterceptingListener, + MessageContext, + StatusObject, +} from './call-interface'; +import { LogVerbosity, Propagate, Status } from './constants'; +import { + Deadline, + deadlineToString, + formatDateDifference, + getRelativeTimeout, + minDeadline, +} from './deadline'; +import { FilterStack, FilterStackFactory } from './filter-stack'; +import { InternalChannel } from './internal-channel'; +import { Metadata } from './metadata'; +import * as logging from './logging'; +import { restrictControlPlaneStatusCode } from './control-plane-status'; + +const TRACER_NAME = 'resolving_call'; + +export class ResolvingCall implements Call { + private child: (Call & DeadlineInfoProvider) | null = null; + private readPending = false; + private pendingMessage: { context: MessageContext; message: Buffer } | null = + null; + private pendingHalfClose = false; + private ended = false; + private readFilterPending = false; + private writeFilterPending = false; + private pendingChildStatus: StatusObject | null = null; + private metadata: Metadata | null = null; + private listener: InterceptingListener | null = null; + private deadline: Deadline; + private host: string; + private statusWatchers: ((status: StatusObject) => void)[] = []; + private deadlineTimer: NodeJS.Timeout = setTimeout(() => {}, 0); + private filterStack: FilterStack | null = null; + + private deadlineStartTime: Date | null = null; + private configReceivedTime: Date | null = null; + private childStartTime: Date | null = null; + + constructor( + private readonly channel: InternalChannel, + private readonly method: string, + options: CallStreamOptions, + private readonly filterStackFactory: FilterStackFactory, + private credentials: CallCredentials, + private callNumber: number + ) { + this.deadline = options.deadline; + this.host = options.host; + if (options.parentCall) { + if (options.flags & Propagate.CANCELLATION) { + options.parentCall.on('cancelled', () => { + this.cancelWithStatus(Status.CANCELLED, 'Cancelled by parent call'); + }); + } + if (options.flags & Propagate.DEADLINE) { + this.trace( + 'Propagating deadline from parent: ' + + options.parentCall.getDeadline() + ); + this.deadline = minDeadline( + this.deadline, + options.parentCall.getDeadline() + ); + } + } + this.trace('Created'); + this.runDeadlineTimer(); + } + + private trace(text: string): void { + logging.trace( + LogVerbosity.DEBUG, + TRACER_NAME, + '[' + this.callNumber + '] ' + text + ); + } + + private runDeadlineTimer() { + clearTimeout(this.deadlineTimer); + this.deadlineStartTime = new Date(); + this.trace('Deadline: ' + deadlineToString(this.deadline)); + const timeout = getRelativeTimeout(this.deadline); + if (timeout !== Infinity) { + this.trace('Deadline will be reached in ' + timeout + 'ms'); + const handleDeadline = () => { + if (!this.deadlineStartTime) { + this.cancelWithStatus(Status.DEADLINE_EXCEEDED, 'Deadline exceeded'); + return; + } + const deadlineInfo: string[] = []; + const deadlineEndTime = new Date(); + deadlineInfo.push(`Deadline exceeded after ${formatDateDifference(this.deadlineStartTime, deadlineEndTime)}`); + if (this.configReceivedTime) { + if (this.configReceivedTime > this.deadlineStartTime) { + deadlineInfo.push(`name resolution: ${formatDateDifference(this.deadlineStartTime, this.configReceivedTime)}`); + } + if (this.childStartTime) { + if (this.childStartTime > this.configReceivedTime) { + deadlineInfo.push(`metadata filters: ${formatDateDifference(this.configReceivedTime, this.childStartTime)}`); + } + } else { + deadlineInfo.push('waiting for metadata filters'); + } + } else { + deadlineInfo.push('waiting for name resolution'); + } + if (this.child) { + deadlineInfo.push(...this.child.getDeadlineInfo()); + } + this.cancelWithStatus(Status.DEADLINE_EXCEEDED, deadlineInfo.join(',')); + }; + if (timeout <= 0) { + process.nextTick(handleDeadline); + } else { + this.deadlineTimer = setTimeout(handleDeadline, timeout); + } + } + } + + private outputStatus(status: StatusObject) { + if (!this.ended) { + this.ended = true; + if (!this.filterStack) { + this.filterStack = this.filterStackFactory.createFilter(); + } + clearTimeout(this.deadlineTimer); + const filteredStatus = this.filterStack.receiveTrailers(status); + this.trace( + 'ended with status: code=' + + filteredStatus.code + + ' details="' + + filteredStatus.details + + '"' + ); + this.statusWatchers.forEach(watcher => watcher(filteredStatus)); + process.nextTick(() => { + this.listener?.onReceiveStatus(filteredStatus); + }); + } + } + + private sendMessageOnChild(context: MessageContext, message: Buffer): void { + if (!this.child) { + throw new Error('sendMessageonChild called with child not populated'); + } + const child = this.child; + this.writeFilterPending = true; + this.filterStack!.sendMessage( + Promise.resolve({ message: message, flags: context.flags }) + ).then( + filteredMessage => { + this.writeFilterPending = false; + child.sendMessageWithContext(context, filteredMessage.message); + if (this.pendingHalfClose) { + child.halfClose(); + } + }, + (status: StatusObject) => { + this.cancelWithStatus(status.code, status.details); + } + ); + } + + getConfig(): void { + if (this.ended) { + return; + } + if (!this.metadata || !this.listener) { + throw new Error('getConfig called before start'); + } + const configResult = this.channel.getConfig(this.method, this.metadata); + if (configResult.type === 'NONE') { + this.channel.queueCallForConfig(this); + return; + } else if (configResult.type === 'ERROR') { + if (this.metadata.getOptions().waitForReady) { + this.channel.queueCallForConfig(this); + } else { + this.outputStatus(configResult.error); + } + return; + } + // configResult.type === 'SUCCESS' + this.configReceivedTime = new Date(); + const config = configResult.config; + if (config.status !== Status.OK) { + const { code, details } = restrictControlPlaneStatusCode( + config.status, + 'Failed to route call to method ' + this.method + ); + this.outputStatus({ + code: code, + details: details, + metadata: new Metadata(), + }); + return; + } + + if (config.methodConfig.timeout) { + const configDeadline = new Date(); + configDeadline.setSeconds( + configDeadline.getSeconds() + config.methodConfig.timeout.seconds + ); + configDeadline.setMilliseconds( + configDeadline.getMilliseconds() + + config.methodConfig.timeout.nanos / 1_000_000 + ); + this.deadline = minDeadline(this.deadline, configDeadline); + this.runDeadlineTimer(); + } + + this.filterStackFactory.push(config.dynamicFilterFactories); + this.filterStack = this.filterStackFactory.createFilter(); + this.filterStack.sendMetadata(Promise.resolve(this.metadata)).then( + filteredMetadata => { + this.child = this.channel.createInnerCall( + config, + this.method, + this.host, + this.credentials, + this.deadline + ); + this.trace('Created child [' + this.child.getCallNumber() + ']'); + this.childStartTime = new Date(); + this.child.start(filteredMetadata, { + onReceiveMetadata: metadata => { + this.trace('Received metadata'); + this.listener!.onReceiveMetadata( + this.filterStack!.receiveMetadata(metadata) + ); + }, + onReceiveMessage: message => { + this.trace('Received message'); + this.readFilterPending = true; + this.filterStack!.receiveMessage(message).then( + filteredMesssage => { + this.trace('Finished filtering received message'); + this.readFilterPending = false; + this.listener!.onReceiveMessage(filteredMesssage); + if (this.pendingChildStatus) { + this.outputStatus(this.pendingChildStatus); + } + }, + (status: StatusObject) => { + this.cancelWithStatus(status.code, status.details); + } + ); + }, + onReceiveStatus: status => { + this.trace('Received status'); + if (this.readFilterPending) { + this.pendingChildStatus = status; + } else { + this.outputStatus(status); + } + }, + }); + if (this.readPending) { + this.child.startRead(); + } + if (this.pendingMessage) { + this.sendMessageOnChild( + this.pendingMessage.context, + this.pendingMessage.message + ); + } else if (this.pendingHalfClose) { + this.child.halfClose(); + } + }, + (status: StatusObject) => { + this.outputStatus(status); + } + ); + } + + reportResolverError(status: StatusObject) { + if (this.metadata?.getOptions().waitForReady) { + this.channel.queueCallForConfig(this); + } else { + this.outputStatus(status); + } + } + cancelWithStatus(status: Status, details: string): void { + this.trace( + 'cancelWithStatus code: ' + status + ' details: "' + details + '"' + ); + this.child?.cancelWithStatus(status, details); + this.outputStatus({ + code: status, + details: details, + metadata: new Metadata(), + }); + } + getPeer(): string { + return this.child?.getPeer() ?? this.channel.getTarget(); + } + start(metadata: Metadata, listener: InterceptingListener): void { + this.trace('start called'); + this.metadata = metadata.clone(); + this.listener = listener; + this.getConfig(); + } + sendMessageWithContext(context: MessageContext, message: Buffer): void { + this.trace('write() called with message of length ' + message.length); + if (this.child) { + this.sendMessageOnChild(context, message); + } else { + this.pendingMessage = { context, message }; + } + } + startRead(): void { + this.trace('startRead called'); + if (this.child) { + this.child.startRead(); + } else { + this.readPending = true; + } + } + halfClose(): void { + this.trace('halfClose called'); + if (this.child && !this.writeFilterPending) { + this.child.halfClose(); + } else { + this.pendingHalfClose = true; + } + } + setCredentials(credentials: CallCredentials): void { + this.credentials = this.credentials.compose(credentials); + } + + addStatusWatcher(watcher: (status: StatusObject) => void) { + this.statusWatchers.push(watcher); + } + + getCallNumber(): number { + return this.callNumber; + } +} diff --git a/packages/grpc-js/src/resolving-load-balancer.ts b/packages/grpc-js/src/resolving-load-balancer.ts index 94dd8c4a9..72aef0dfd 100644 --- a/packages/grpc-js/src/resolving-load-balancer.ts +++ b/packages/grpc-js/src/resolving-load-balancer.ts @@ -18,25 +18,28 @@ import { ChannelControlHelper, LoadBalancer, - getFirstUsableConfig, - LoadBalancingConfig + TypedLoadBalancingConfig, + selectLbConfigFromList, } from './load-balancer'; -import { ServiceConfig, validateServiceConfig } from './service-config'; -import { ConnectivityState } from './channel'; +import { + MethodConfig, + ServiceConfig, + validateServiceConfig, +} from './service-config'; +import { ConnectivityState } from './connectivity-state'; import { ConfigSelector, createResolver, Resolver } from './resolver'; import { ServiceError } from './call'; import { Picker, UnavailablePicker, QueuePicker } from './picker'; -import { BackoffTimeout } from './backoff-timeout'; +import { BackoffOptions, BackoffTimeout } from './backoff-timeout'; import { Status } from './constants'; -import { StatusObject } from './call-stream'; +import { StatusObject } from './call-interface'; import { Metadata } from './metadata'; import * as logging from './logging'; import { LogVerbosity } from './constants'; -import { SubchannelAddress } from './subchannel'; +import { Endpoint } from './subchannel-address'; import { GrpcUri, uriToString } from './uri-parser'; import { ChildLoadBalancerHandler } from './load-balancer-child-handler'; import { ChannelOptions } from './channel-options'; -import { PickFirstLoadBalancingConfig } from './load-balancer-pick-first'; const TRACER_NAME = 'resolving_load_balancer'; @@ -44,36 +47,104 @@ function trace(text: string): void { logging.trace(LogVerbosity.DEBUG, TRACER_NAME, text); } -const DEFAULT_LOAD_BALANCER_NAME = 'pick_first'; +type NameMatchLevel = 'EMPTY' | 'SERVICE' | 'SERVICE_AND_METHOD'; + +/** + * Name match levels in order from most to least specific. This is the order in + * which searches will be performed. + */ +const NAME_MATCH_LEVEL_ORDER: NameMatchLevel[] = [ + 'SERVICE_AND_METHOD', + 'SERVICE', + 'EMPTY', +]; + +function hasMatchingName( + service: string, + method: string, + methodConfig: MethodConfig, + matchLevel: NameMatchLevel +): boolean { + for (const name of methodConfig.name) { + switch (matchLevel) { + case 'EMPTY': + if (!name.service && !name.method) { + return true; + } + break; + case 'SERVICE': + if (name.service === service && !name.method) { + return true; + } + break; + case 'SERVICE_AND_METHOD': + if (name.service === service && name.method === method) { + return true; + } + } + } + return false; +} -function getDefaultConfigSelector(serviceConfig: ServiceConfig | null): ConfigSelector { - return function defaultConfigSelector(methodName: string, metadata: Metadata) { +function findMatchingConfig( + service: string, + method: string, + methodConfigs: MethodConfig[], + matchLevel: NameMatchLevel +): MethodConfig | null { + for (const config of methodConfigs) { + if (hasMatchingName(service, method, config, matchLevel)) { + return config; + } + } + return null; +} + +function getDefaultConfigSelector( + serviceConfig: ServiceConfig | null +): ConfigSelector { + return function defaultConfigSelector( + methodName: string, + metadata: Metadata + ) { const splitName = methodName.split('/').filter(x => x.length > 0); const service = splitName[0] ?? ''; const method = splitName[1] ?? ''; if (serviceConfig && serviceConfig.methodConfig) { - for (const methodConfig of serviceConfig.methodConfig) { - for (const name of methodConfig.name) { - if (name.service === service && (name.method === undefined || name.method === method)) { - return { - methodConfig: methodConfig, - pickInformation: {}, - status: Status.OK - }; - } + /* Check for the following in order, and return the first method + * config that matches: + * 1. A name that exactly matches the service and method + * 2. A name with no method set that matches the service + * 3. An empty name + */ + for (const matchLevel of NAME_MATCH_LEVEL_ORDER) { + const matchingConfig = findMatchingConfig( + service, + method, + serviceConfig.methodConfig, + matchLevel + ); + if (matchingConfig) { + return { + methodConfig: matchingConfig, + pickInformation: {}, + status: Status.OK, + dynamicFilterFactories: [], + }; } } } return { - methodConfig: {name: []}, + methodConfig: { name: [] }, pickInformation: {}, - status: Status.OK + status: Status.OK, + dynamicFilterFactories: [], }; - } + }; } export interface ResolutionCallback { - (configSelector: ConfigSelector): void; + (serviceConfig: ServiceConfig, configSelector: ConfigSelector): void; } export interface ResolutionFailureCallback { @@ -84,9 +155,9 @@ export class ResolvingLoadBalancer implements LoadBalancer { /** * The resolver class constructed for the target address. */ - private innerResolver: Resolver; + private readonly innerResolver: Resolver; - private childLoadBalancer: ChildLoadBalancerHandler; + private readonly childLoadBalancer: ChildLoadBalancerHandler; private latestChildState: ConnectivityState = ConnectivityState.IDLE; private latestChildPicker: Picker = new QueuePicker(this); /** @@ -127,7 +198,7 @@ export class ResolvingLoadBalancer implements LoadBalancer { constructor( private readonly target: GrpcUri, private readonly channelControlHelper: ChannelControlHelper, - private readonly channelOptions: ChannelOptions, + channelOptions: ChannelOptions, private readonly onSuccessfulResolution: ResolutionCallback, private readonly onFailedResolution: ResolutionFailureCallback ) { @@ -141,38 +212,51 @@ export class ResolvingLoadBalancer implements LoadBalancer { methodConfig: [], }; } + this.updateState(ConnectivityState.IDLE, new QueuePicker(this)); - this.childLoadBalancer = new ChildLoadBalancerHandler({ - createSubchannel: channelControlHelper.createSubchannel.bind( - channelControlHelper - ), - requestReresolution: () => { - /* If the backoffTimeout is running, we're still backing off from - * making resolve requests, so we shouldn't make another one here. - * In that case, the backoff timer callback will call - * updateResolution */ - if (this.backoffTimeout.isRunning()) { - this.continueResolving = true; - } else { - this.updateResolution(); - } - }, - updateState: (newState: ConnectivityState, picker: Picker) => { - this.latestChildState = newState; - this.latestChildPicker = picker; - this.updateState(newState, picker); + this.childLoadBalancer = new ChildLoadBalancerHandler( + { + createSubchannel: + channelControlHelper.createSubchannel.bind(channelControlHelper), + requestReresolution: () => { + /* If the backoffTimeout is running, we're still backing off from + * making resolve requests, so we shouldn't make another one here. + * In that case, the backoff timer callback will call + * updateResolution */ + if (this.backoffTimeout.isRunning()) { + trace( + 'requestReresolution delayed by backoff timer until ' + + this.backoffTimeout.getEndTime().toISOString() + ); + this.continueResolving = true; + } else { + this.updateResolution(); + } + }, + updateState: (newState: ConnectivityState, picker: Picker) => { + this.latestChildState = newState; + this.latestChildPicker = picker; + this.updateState(newState, picker); + }, + addChannelzChild: + channelControlHelper.addChannelzChild.bind(channelControlHelper), + removeChannelzChild: + channelControlHelper.removeChannelzChild.bind(channelControlHelper), }, - }); + channelOptions + ); this.innerResolver = createResolver( target, { onSuccessfulResolution: ( - addressList: SubchannelAddress[], + endpointList: Endpoint[], serviceConfig: ServiceConfig | null, serviceConfigError: ServiceError | null, configSelector: ConfigSelector | null, attributes: { [key: string]: unknown } ) => { + this.backoffTimeout.stop(); + this.backoffTimeout.reset(); let workingServiceConfig: ServiceConfig | null = null; /* This first group of conditionals implements the algorithm described * in https://github.com/grpc/proposal/blob/master/A21-service-config-error-handling.md @@ -201,7 +285,10 @@ export class ResolvingLoadBalancer implements LoadBalancer { } const workingConfigList = workingServiceConfig?.loadBalancingConfig ?? []; - const loadBalancingConfig = getFirstUsableConfig(workingConfigList, true); + const loadBalancingConfig = selectLbConfigFromList( + workingConfigList, + true + ); if (loadBalancingConfig === null) { // There were load balancing configs but none are supported. This counts as a resolution failure this.handleResolutionFailure({ @@ -213,12 +300,16 @@ export class ResolvingLoadBalancer implements LoadBalancer { return; } this.childLoadBalancer.updateAddressList( - addressList, + endpointList, loadBalancingConfig, attributes ); - const finalServiceConfig = workingServiceConfig ?? this.defaultServiceConfig; - this.onSuccessfulResolution(configSelector ?? getDefaultConfigSelector(finalServiceConfig)); + const finalServiceConfig = + workingServiceConfig ?? this.defaultServiceConfig; + this.onSuccessfulResolution( + finalServiceConfig, + configSelector ?? getDefaultConfigSelector(finalServiceConfig) + ); }, onError: (error: StatusObject) => { this.handleResolutionFailure(error); @@ -226,7 +317,10 @@ export class ResolvingLoadBalancer implements LoadBalancer { }, channelOptions ); - + const backoffOptions: BackoffOptions = { + initialDelay: channelOptions['grpc.initial_reconnect_backoff_ms'], + maxDelay: channelOptions['grpc.max_reconnect_backoff_ms'], + }; this.backoffTimeout = new BackoffTimeout(() => { if (this.continueResolving) { this.updateResolution(); @@ -234,15 +328,20 @@ export class ResolvingLoadBalancer implements LoadBalancer { } else { this.updateState(this.latestChildState, this.latestChildPicker); } - }); + }, backoffOptions); this.backoffTimeout.unref(); } private updateResolution() { this.innerResolver.updateResolution(); if (this.currentState === ConnectivityState.IDLE) { - this.updateState(ConnectivityState.CONNECTING, new QueuePicker(this)); + /* this.latestChildPicker is initialized as new QueuePicker(this), which + * is an appropriate value here if the child LB policy is unset. + * Otherwise, we want to delegate to the child here, in case that + * triggers something. */ + this.updateState(ConnectivityState.CONNECTING, this.latestChildPicker); } + this.backoffTimeout.runOnce(); } private updateState(connectivityState: ConnectivityState, picker: Picker) { @@ -255,7 +354,7 @@ export class ResolvingLoadBalancer implements LoadBalancer { ); // Ensure that this.exitIdle() is called by the picker if (connectivityState === ConnectivityState.IDLE) { - picker = new QueuePicker(this); + picker = new QueuePicker(this, picker); } this.currentState = connectivityState; this.channelControlHelper.updateState(connectivityState, picker); @@ -269,25 +368,26 @@ export class ResolvingLoadBalancer implements LoadBalancer { ); this.onFailedResolution(error); } - this.backoffTimeout.runOnce(); } exitIdle() { - this.childLoadBalancer.exitIdle(); - if (this.currentState === ConnectivityState.IDLE) { + if ( + this.currentState === ConnectivityState.IDLE || + this.currentState === ConnectivityState.TRANSIENT_FAILURE + ) { if (this.backoffTimeout.isRunning()) { this.continueResolving = true; } else { this.updateResolution(); } - this.updateState(ConnectivityState.CONNECTING, new QueuePicker(this)); } + this.childLoadBalancer.exitIdle(); } updateAddressList( - addressList: SubchannelAddress[], - lbConfig: LoadBalancingConfig | null - ) { + endpointList: Endpoint[], + lbConfig: TypedLoadBalancingConfig | null + ): never { throw new Error('updateAddressList not supported on ResolvingLoadBalancer'); } @@ -299,7 +399,13 @@ export class ResolvingLoadBalancer implements LoadBalancer { destroy() { this.childLoadBalancer.destroy(); this.innerResolver.destroy(); - this.updateState(ConnectivityState.SHUTDOWN, new UnavailablePicker()); + this.backoffTimeout.reset(); + this.backoffTimeout.stop(); + this.latestChildState = ConnectivityState.IDLE; + this.latestChildPicker = new QueuePicker(this); + this.currentState = ConnectivityState.IDLE; + this.previousServiceConfig = null; + this.continueResolving = false; } getTypeName() { diff --git a/packages/grpc-js/src/retrying-call.ts b/packages/grpc-js/src/retrying-call.ts new file mode 100644 index 000000000..1c5ffaa4f --- /dev/null +++ b/packages/grpc-js/src/retrying-call.ts @@ -0,0 +1,842 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { CallCredentials } from './call-credentials'; +import { LogVerbosity, Status } from './constants'; +import { Deadline, formatDateDifference } from './deadline'; +import { Metadata } from './metadata'; +import { CallConfig } from './resolver'; +import * as logging from './logging'; +import { + Call, + DeadlineInfoProvider, + InterceptingListener, + MessageContext, + StatusObject, + WriteCallback, + WriteObject, +} from './call-interface'; +import { + LoadBalancingCall, + StatusObjectWithProgress, +} from './load-balancing-call'; +import { InternalChannel } from './internal-channel'; + +const TRACER_NAME = 'retrying_call'; + +export class RetryThrottler { + private tokens: number; + constructor( + private readonly maxTokens: number, + private readonly tokenRatio: number, + previousRetryThrottler?: RetryThrottler + ) { + if (previousRetryThrottler) { + /* When carrying over tokens from a previous config, rescale them to the + * new max value */ + this.tokens = + previousRetryThrottler.tokens * + (maxTokens / previousRetryThrottler.maxTokens); + } else { + this.tokens = maxTokens; + } + } + + addCallSucceeded() { + this.tokens = Math.max(this.tokens + this.tokenRatio, this.maxTokens); + } + + addCallFailed() { + this.tokens = Math.min(this.tokens - 1, 0); + } + + canRetryCall() { + return this.tokens > this.maxTokens / 2; + } +} + +export class MessageBufferTracker { + private totalAllocated = 0; + private allocatedPerCall: Map = new Map(); + + constructor(private totalLimit: number, private limitPerCall: number) {} + + allocate(size: number, callId: number): boolean { + const currentPerCall = this.allocatedPerCall.get(callId) ?? 0; + if ( + this.limitPerCall - currentPerCall < size || + this.totalLimit - this.totalAllocated < size + ) { + return false; + } + this.allocatedPerCall.set(callId, currentPerCall + size); + this.totalAllocated += size; + return true; + } + + free(size: number, callId: number) { + if (this.totalAllocated < size) { + throw new Error( + `Invalid buffer allocation state: call ${callId} freed ${size} > total allocated ${this.totalAllocated}` + ); + } + this.totalAllocated -= size; + const currentPerCall = this.allocatedPerCall.get(callId) ?? 0; + if (currentPerCall < size) { + throw new Error( + `Invalid buffer allocation state: call ${callId} freed ${size} > allocated for call ${currentPerCall}` + ); + } + this.allocatedPerCall.set(callId, currentPerCall - size); + } + + freeAll(callId: number) { + const currentPerCall = this.allocatedPerCall.get(callId) ?? 0; + if (this.totalAllocated < currentPerCall) { + throw new Error( + `Invalid buffer allocation state: call ${callId} allocated ${currentPerCall} > total allocated ${this.totalAllocated}` + ); + } + this.totalAllocated -= currentPerCall; + this.allocatedPerCall.delete(callId); + } +} + +type UnderlyingCallState = 'ACTIVE' | 'COMPLETED'; + +interface UnderlyingCall { + state: UnderlyingCallState; + call: LoadBalancingCall; + nextMessageToSend: number; + startTime: Date; +} + +/** + * A retrying call can be in one of these states: + * RETRY: Retries are configured and new attempts may be sent + * HEDGING: Hedging is configured and new attempts may be sent + * TRANSPARENT_ONLY: Neither retries nor hedging are configured, and + * transparent retry attempts may still be sent + * COMMITTED: One attempt is committed, and no new attempts will be + * sent + */ +type RetryingCallState = 'RETRY' | 'HEDGING' | 'TRANSPARENT_ONLY' | 'COMMITTED'; + +/** + * The different types of objects that can be stored in the write buffer, with + * the following meanings: + * MESSAGE: This is a message to be sent. + * HALF_CLOSE: When this entry is reached, the calls should send a half-close. + * FREED: This slot previously contained a message that has been sent on all + * child calls and is no longer needed. + */ +type WriteBufferEntryType = 'MESSAGE' | 'HALF_CLOSE' | 'FREED'; + +/** + * Entry in the buffer of messages to send to the remote end. + */ +interface WriteBufferEntry { + entryType: WriteBufferEntryType; + /** + * Message to send. + * Only populated if entryType is MESSAGE. + */ + message?: WriteObject; + /** + * Callback to call after sending the message. + * Only populated if entryType is MESSAGE and the call is in the COMMITTED + * state. + */ + callback?: WriteCallback; + /** + * Indicates whether the message is allocated in the buffer tracker. Ignored + * if entryType is not MESSAGE. Should be the return value of + * bufferTracker.allocate. + */ + allocated: boolean; +} + +const PREVIONS_RPC_ATTEMPTS_METADATA_KEY = 'grpc-previous-rpc-attempts'; + +export class RetryingCall implements Call, DeadlineInfoProvider { + private state: RetryingCallState; + private listener: InterceptingListener | null = null; + private initialMetadata: Metadata | null = null; + private underlyingCalls: UnderlyingCall[] = []; + private writeBuffer: WriteBufferEntry[] = []; + /** + * The offset of message indices in the writeBuffer. For example, if + * writeBufferOffset is 10, message 10 is in writeBuffer[0] and message 15 + * is in writeBuffer[5]. + */ + private writeBufferOffset = 0; + /** + * Tracks whether a read has been started, so that we know whether to start + * reads on new child calls. This only matters for the first read, because + * once a message comes in the child call becomes committed and there will + * be no new child calls. + */ + private readStarted = false; + private transparentRetryUsed = false; + /** + * Number of attempts so far + */ + private attempts = 0; + private hedgingTimer: NodeJS.Timeout | null = null; + private committedCallIndex: number | null = null; + private initialRetryBackoffSec = 0; + private nextRetryBackoffSec = 0; + private startTime: Date; + constructor( + private readonly channel: InternalChannel, + private readonly callConfig: CallConfig, + private readonly methodName: string, + private readonly host: string, + private readonly credentials: CallCredentials, + private readonly deadline: Deadline, + private readonly callNumber: number, + private readonly bufferTracker: MessageBufferTracker, + private readonly retryThrottler?: RetryThrottler + ) { + if (callConfig.methodConfig.retryPolicy) { + this.state = 'RETRY'; + const retryPolicy = callConfig.methodConfig.retryPolicy; + this.nextRetryBackoffSec = this.initialRetryBackoffSec = Number( + retryPolicy.initialBackoff.substring( + 0, + retryPolicy.initialBackoff.length - 1 + ) + ); + } else if (callConfig.methodConfig.hedgingPolicy) { + this.state = 'HEDGING'; + } else { + this.state = 'TRANSPARENT_ONLY'; + } + this.startTime = new Date(); + } + getDeadlineInfo(): string[] { + if (this.underlyingCalls.length === 0) { + return []; + } + const deadlineInfo: string[] = []; + const latestCall = this.underlyingCalls[this.underlyingCalls.length - 1]; + if (this.underlyingCalls.length > 1) { + deadlineInfo.push(`previous attempts: ${this.underlyingCalls.length - 1}`); + } + if (latestCall.startTime > this.startTime) { + deadlineInfo.push(`time to current attempt start: ${formatDateDifference(this.startTime, latestCall.startTime)}`); + } + deadlineInfo.push(...latestCall.call.getDeadlineInfo()); + return deadlineInfo; + } + getCallNumber(): number { + return this.callNumber; + } + + private trace(text: string): void { + logging.trace( + LogVerbosity.DEBUG, + TRACER_NAME, + '[' + this.callNumber + '] ' + text + ); + } + + private reportStatus(statusObject: StatusObject) { + this.trace( + 'ended with status: code=' + + statusObject.code + + ' details="' + + statusObject.details + + '" start time=' + + this.startTime.toISOString() + ); + this.bufferTracker.freeAll(this.callNumber); + this.writeBufferOffset = this.writeBufferOffset + this.writeBuffer.length; + this.writeBuffer = []; + process.nextTick(() => { + // Explicitly construct status object to remove progress field + this.listener?.onReceiveStatus({ + code: statusObject.code, + details: statusObject.details, + metadata: statusObject.metadata, + }); + }); + } + + cancelWithStatus(status: Status, details: string): void { + this.trace( + 'cancelWithStatus code: ' + status + ' details: "' + details + '"' + ); + this.reportStatus({ code: status, details, metadata: new Metadata() }); + for (const { call } of this.underlyingCalls) { + call.cancelWithStatus(status, details); + } + } + getPeer(): string { + if (this.committedCallIndex !== null) { + return this.underlyingCalls[this.committedCallIndex].call.getPeer(); + } else { + return 'unknown'; + } + } + + private getBufferEntry(messageIndex: number): WriteBufferEntry { + return ( + this.writeBuffer[messageIndex - this.writeBufferOffset] ?? { + entryType: 'FREED', + allocated: false, + } + ); + } + + private getNextBufferIndex() { + return this.writeBufferOffset + this.writeBuffer.length; + } + + private clearSentMessages() { + if (this.state !== 'COMMITTED') { + return; + } + const earliestNeededMessageIndex = + this.underlyingCalls[this.committedCallIndex!].nextMessageToSend; + for ( + let messageIndex = this.writeBufferOffset; + messageIndex < earliestNeededMessageIndex; + messageIndex++ + ) { + const bufferEntry = this.getBufferEntry(messageIndex); + if (bufferEntry.allocated) { + this.bufferTracker.free( + bufferEntry.message!.message.length, + this.callNumber + ); + } + } + this.writeBuffer = this.writeBuffer.slice( + earliestNeededMessageIndex - this.writeBufferOffset + ); + this.writeBufferOffset = earliestNeededMessageIndex; + } + + private commitCall(index: number) { + if (this.state === 'COMMITTED') { + return; + } + if (this.underlyingCalls[index].state === 'COMPLETED') { + return; + } + this.trace( + 'Committing call [' + + this.underlyingCalls[index].call.getCallNumber() + + '] at index ' + + index + ); + this.state = 'COMMITTED'; + this.committedCallIndex = index; + for (let i = 0; i < this.underlyingCalls.length; i++) { + if (i === index) { + continue; + } + if (this.underlyingCalls[i].state === 'COMPLETED') { + continue; + } + this.underlyingCalls[i].state = 'COMPLETED'; + this.underlyingCalls[i].call.cancelWithStatus( + Status.CANCELLED, + 'Discarded in favor of other hedged attempt' + ); + } + this.clearSentMessages(); + } + + private commitCallWithMostMessages() { + if (this.state === 'COMMITTED') { + return; + } + let mostMessages = -1; + let callWithMostMessages = -1; + for (const [index, childCall] of this.underlyingCalls.entries()) { + if ( + childCall.state === 'ACTIVE' && + childCall.nextMessageToSend > mostMessages + ) { + mostMessages = childCall.nextMessageToSend; + callWithMostMessages = index; + } + } + if (callWithMostMessages === -1) { + /* There are no active calls, disable retries to force the next call that + * is started to be committed. */ + this.state = 'TRANSPARENT_ONLY'; + } else { + this.commitCall(callWithMostMessages); + } + } + + private isStatusCodeInList(list: (Status | string)[], code: Status) { + return list.some( + value => + value === code || + value.toString().toLowerCase() === Status[code].toLowerCase() + ); + } + + private getNextRetryBackoffMs() { + const retryPolicy = this.callConfig?.methodConfig.retryPolicy; + if (!retryPolicy) { + return 0; + } + const nextBackoffMs = Math.random() * this.nextRetryBackoffSec * 1000; + const maxBackoffSec = Number( + retryPolicy.maxBackoff.substring(0, retryPolicy.maxBackoff.length - 1) + ); + this.nextRetryBackoffSec = Math.min( + this.nextRetryBackoffSec * retryPolicy.backoffMultiplier, + maxBackoffSec + ); + return nextBackoffMs; + } + + private maybeRetryCall( + pushback: number | null, + callback: (retried: boolean) => void + ) { + if (this.state !== 'RETRY') { + callback(false); + return; + } + const retryPolicy = this.callConfig!.methodConfig.retryPolicy!; + if (this.attempts >= Math.min(retryPolicy.maxAttempts, 5)) { + callback(false); + return; + } + let retryDelayMs: number; + if (pushback === null) { + retryDelayMs = this.getNextRetryBackoffMs(); + } else if (pushback < 0) { + this.state = 'TRANSPARENT_ONLY'; + callback(false); + return; + } else { + retryDelayMs = pushback; + this.nextRetryBackoffSec = this.initialRetryBackoffSec; + } + setTimeout(() => { + if (this.state !== 'RETRY') { + callback(false); + return; + } + if (this.retryThrottler?.canRetryCall() ?? true) { + callback(true); + this.attempts += 1; + this.startNewAttempt(); + } + }, retryDelayMs); + } + + private countActiveCalls(): number { + let count = 0; + for (const call of this.underlyingCalls) { + if (call?.state === 'ACTIVE') { + count += 1; + } + } + return count; + } + + private handleProcessedStatus( + status: StatusObject, + callIndex: number, + pushback: number | null + ) { + switch (this.state) { + case 'COMMITTED': + case 'TRANSPARENT_ONLY': + this.commitCall(callIndex); + this.reportStatus(status); + break; + case 'HEDGING': + if ( + this.isStatusCodeInList( + this.callConfig!.methodConfig.hedgingPolicy!.nonFatalStatusCodes ?? + [], + status.code + ) + ) { + this.retryThrottler?.addCallFailed(); + let delayMs: number; + if (pushback === null) { + delayMs = 0; + } else if (pushback < 0) { + this.state = 'TRANSPARENT_ONLY'; + this.commitCall(callIndex); + this.reportStatus(status); + return; + } else { + delayMs = pushback; + } + setTimeout(() => { + this.maybeStartHedgingAttempt(); + // If after trying to start a call there are no active calls, this was the last one + if (this.countActiveCalls() === 0) { + this.commitCall(callIndex); + this.reportStatus(status); + } + }, delayMs); + } else { + this.commitCall(callIndex); + this.reportStatus(status); + } + break; + case 'RETRY': + if ( + this.isStatusCodeInList( + this.callConfig!.methodConfig.retryPolicy!.retryableStatusCodes, + status.code + ) + ) { + this.retryThrottler?.addCallFailed(); + this.maybeRetryCall(pushback, retried => { + if (!retried) { + this.commitCall(callIndex); + this.reportStatus(status); + } + }); + } else { + this.commitCall(callIndex); + this.reportStatus(status); + } + break; + } + } + + private getPushback(metadata: Metadata): number | null { + const mdValue = metadata.get('grpc-retry-pushback-ms'); + if (mdValue.length === 0) { + return null; + } + try { + return parseInt(mdValue[0] as string); + } catch (e) { + return -1; + } + } + + private handleChildStatus( + status: StatusObjectWithProgress, + callIndex: number + ) { + if (this.underlyingCalls[callIndex].state === 'COMPLETED') { + return; + } + this.trace( + 'state=' + + this.state + + ' handling status with progress ' + + status.progress + + ' from child [' + + this.underlyingCalls[callIndex].call.getCallNumber() + + '] in state ' + + this.underlyingCalls[callIndex].state + ); + this.underlyingCalls[callIndex].state = 'COMPLETED'; + if (status.code === Status.OK) { + this.retryThrottler?.addCallSucceeded(); + this.commitCall(callIndex); + this.reportStatus(status); + return; + } + if (this.state === 'COMMITTED') { + this.reportStatus(status); + return; + } + const pushback = this.getPushback(status.metadata); + switch (status.progress) { + case 'NOT_STARTED': + // RPC never leaves the client, always safe to retry + this.startNewAttempt(); + break; + case 'REFUSED': + // RPC reaches the server library, but not the server application logic + if (this.transparentRetryUsed) { + this.handleProcessedStatus(status, callIndex, pushback); + } else { + this.transparentRetryUsed = true; + this.startNewAttempt(); + } + break; + case 'DROP': + this.commitCall(callIndex); + this.reportStatus(status); + break; + case 'PROCESSED': + this.handleProcessedStatus(status, callIndex, pushback); + break; + } + } + + private maybeStartHedgingAttempt() { + if (this.state !== 'HEDGING') { + return; + } + if (!this.callConfig.methodConfig.hedgingPolicy) { + return; + } + const hedgingPolicy = this.callConfig.methodConfig.hedgingPolicy; + if (this.attempts >= Math.min(hedgingPolicy.maxAttempts, 5)) { + return; + } + this.attempts += 1; + this.startNewAttempt(); + this.maybeStartHedgingTimer(); + } + + private maybeStartHedgingTimer() { + if (this.hedgingTimer) { + clearTimeout(this.hedgingTimer); + } + if (this.state !== 'HEDGING') { + return; + } + if (!this.callConfig.methodConfig.hedgingPolicy) { + return; + } + const hedgingPolicy = this.callConfig.methodConfig.hedgingPolicy; + if (this.attempts >= Math.min(hedgingPolicy.maxAttempts, 5)) { + return; + } + const hedgingDelayString = hedgingPolicy.hedgingDelay ?? '0s'; + const hedgingDelaySec = Number( + hedgingDelayString.substring(0, hedgingDelayString.length - 1) + ); + this.hedgingTimer = setTimeout(() => { + this.maybeStartHedgingAttempt(); + }, hedgingDelaySec * 1000); + this.hedgingTimer.unref?.(); + } + + private startNewAttempt() { + const child = this.channel.createLoadBalancingCall( + this.callConfig, + this.methodName, + this.host, + this.credentials, + this.deadline + ); + this.trace( + 'Created child call [' + + child.getCallNumber() + + '] for attempt ' + + this.attempts + ); + const index = this.underlyingCalls.length; + this.underlyingCalls.push({ + state: 'ACTIVE', + call: child, + nextMessageToSend: 0, + startTime: new Date() + }); + const previousAttempts = this.attempts - 1; + const initialMetadata = this.initialMetadata!.clone(); + if (previousAttempts > 0) { + initialMetadata.set( + PREVIONS_RPC_ATTEMPTS_METADATA_KEY, + `${previousAttempts}` + ); + } + let receivedMetadata = false; + child.start(initialMetadata, { + onReceiveMetadata: metadata => { + this.trace( + 'Received metadata from child [' + child.getCallNumber() + ']' + ); + this.commitCall(index); + receivedMetadata = true; + if (previousAttempts > 0) { + metadata.set( + PREVIONS_RPC_ATTEMPTS_METADATA_KEY, + `${previousAttempts}` + ); + } + if (this.underlyingCalls[index].state === 'ACTIVE') { + this.listener!.onReceiveMetadata(metadata); + } + }, + onReceiveMessage: message => { + this.trace( + 'Received message from child [' + child.getCallNumber() + ']' + ); + this.commitCall(index); + if (this.underlyingCalls[index].state === 'ACTIVE') { + this.listener!.onReceiveMessage(message); + } + }, + onReceiveStatus: status => { + this.trace( + 'Received status from child [' + child.getCallNumber() + ']' + ); + if (!receivedMetadata && previousAttempts > 0) { + status.metadata.set( + PREVIONS_RPC_ATTEMPTS_METADATA_KEY, + `${previousAttempts}` + ); + } + this.handleChildStatus(status, index); + }, + }); + this.sendNextChildMessage(index); + if (this.readStarted) { + child.startRead(); + } + } + + start(metadata: Metadata, listener: InterceptingListener): void { + this.trace('start called'); + this.listener = listener; + this.initialMetadata = metadata; + this.attempts += 1; + this.startNewAttempt(); + this.maybeStartHedgingTimer(); + } + + private handleChildWriteCompleted(childIndex: number) { + const childCall = this.underlyingCalls[childIndex]; + const messageIndex = childCall.nextMessageToSend; + this.getBufferEntry(messageIndex).callback?.(); + this.clearSentMessages(); + childCall.nextMessageToSend += 1; + this.sendNextChildMessage(childIndex); + } + + private sendNextChildMessage(childIndex: number) { + const childCall = this.underlyingCalls[childIndex]; + if (childCall.state === 'COMPLETED') { + return; + } + if (this.getBufferEntry(childCall.nextMessageToSend)) { + const bufferEntry = this.getBufferEntry(childCall.nextMessageToSend); + switch (bufferEntry.entryType) { + case 'MESSAGE': + childCall.call.sendMessageWithContext( + { + callback: error => { + // Ignore error + this.handleChildWriteCompleted(childIndex); + }, + }, + bufferEntry.message!.message + ); + break; + case 'HALF_CLOSE': + childCall.nextMessageToSend += 1; + childCall.call.halfClose(); + break; + case 'FREED': + // Should not be possible + break; + } + } + } + + sendMessageWithContext(context: MessageContext, message: Buffer): void { + this.trace('write() called with message of length ' + message.length); + const writeObj: WriteObject = { + message, + flags: context.flags, + }; + const messageIndex = this.getNextBufferIndex(); + const bufferEntry: WriteBufferEntry = { + entryType: 'MESSAGE', + message: writeObj, + allocated: this.bufferTracker.allocate(message.length, this.callNumber), + }; + this.writeBuffer.push(bufferEntry); + if (bufferEntry.allocated) { + context.callback?.(); + for (const [callIndex, call] of this.underlyingCalls.entries()) { + if ( + call.state === 'ACTIVE' && + call.nextMessageToSend === messageIndex + ) { + call.call.sendMessageWithContext( + { + callback: error => { + // Ignore error + this.handleChildWriteCompleted(callIndex); + }, + }, + message + ); + } + } + } else { + this.commitCallWithMostMessages(); + // commitCallWithMostMessages can fail if we are between ping attempts + if (this.committedCallIndex === null) { + return; + } + const call = this.underlyingCalls[this.committedCallIndex]; + bufferEntry.callback = context.callback; + if (call.state === 'ACTIVE' && call.nextMessageToSend === messageIndex) { + call.call.sendMessageWithContext( + { + callback: error => { + // Ignore error + this.handleChildWriteCompleted(this.committedCallIndex!); + }, + }, + message + ); + } + } + } + startRead(): void { + this.trace('startRead called'); + this.readStarted = true; + for (const underlyingCall of this.underlyingCalls) { + if (underlyingCall?.state === 'ACTIVE') { + underlyingCall.call.startRead(); + } + } + } + halfClose(): void { + this.trace('halfClose called'); + const halfCloseIndex = this.getNextBufferIndex(); + this.writeBuffer.push({ + entryType: 'HALF_CLOSE', + allocated: false, + }); + for (const call of this.underlyingCalls) { + if ( + call?.state === 'ACTIVE' && + call.nextMessageToSend === halfCloseIndex + ) { + call.nextMessageToSend += 1; + call.call.halfClose(); + } + } + } + setCredentials(newCredentials: CallCredentials): void { + throw new Error('Method not implemented.'); + } + getMethod(): string { + return this.methodName; + } + getHost(): string { + return this.host; + } +} diff --git a/packages/grpc-js/src/server-call.ts b/packages/grpc-js/src/server-call.ts index 7d3fe192b..ccc80a1aa 100644 --- a/packages/grpc-js/src/server-call.ts +++ b/packages/grpc-js/src/server-call.ts @@ -16,58 +16,15 @@ */ import { EventEmitter } from 'events'; -import * as http2 from 'http2'; import { Duplex, Readable, Writable } from 'stream'; -import { Deadline, StatusObject } from './call-stream'; -import { - Status, - DEFAULT_MAX_SEND_MESSAGE_LENGTH, - DEFAULT_MAX_RECEIVE_MESSAGE_LENGTH, - LogVerbosity, -} from './constants'; -import { Deserialize, Serialize } from './make-client'; +import { Status } from './constants'; +import type { Deserialize, Serialize } from './make-client'; import { Metadata } from './metadata'; -import { StreamDecoder } from './stream-decoder'; -import { ObjectReadable, ObjectWritable } from './object-stream'; -import { ChannelOptions } from './channel-options'; -import * as logging from './logging'; - -const TRACER_NAME = 'server_call'; - -function trace(text: string): void { - logging.trace(LogVerbosity.DEBUG, TRACER_NAME, text); -} - -interface DeadlineUnitIndexSignature { - [name: string]: number; -} - -const GRPC_ACCEPT_ENCODING_HEADER = 'grpc-accept-encoding'; -const GRPC_ENCODING_HEADER = 'grpc-encoding'; -const GRPC_MESSAGE_HEADER = 'grpc-message'; -const GRPC_STATUS_HEADER = 'grpc-status'; -const GRPC_TIMEOUT_HEADER = 'grpc-timeout'; -const DEADLINE_REGEX = /(\d{1,8})\s*([HMSmun])/; -const deadlineUnitsToMs: DeadlineUnitIndexSignature = { - H: 3600000, - M: 60000, - S: 1000, - m: 1, - u: 0.001, - n: 0.000001, -}; -const defaultResponseHeaders = { - // TODO(cjihrig): Remove these encoding headers from the default response - // once compression is integrated. - [GRPC_ACCEPT_ENCODING_HEADER]: 'identity', - [GRPC_ENCODING_HEADER]: 'identity', - [http2.constants.HTTP2_HEADER_STATUS]: http2.constants.HTTP_STATUS_OK, - [http2.constants.HTTP2_HEADER_CONTENT_TYPE]: 'application/grpc+proto', -}; -const defaultResponseOptions = { - waitForTrailers: true, -} as http2.ServerStreamResponseOptions; +import type { ObjectReadable, ObjectWritable } from './object-stream'; +import type { StatusObject, PartialStatusObject } from './call-interface'; +import type { Deadline } from './deadline'; +import type { ServerInterceptingCallInterface } from './server-interceptors'; export type ServerStatusResponse = Partial; @@ -79,39 +36,62 @@ export type ServerSurfaceCall = { getPeer(): string; sendMetadata(responseMetadata: Metadata): void; getDeadline(): Deadline; + getPath(): string; } & EventEmitter; export type ServerUnaryCall = ServerSurfaceCall & { request: RequestType; }; -export type ServerReadableStream< - RequestType, - ResponseType -> = ServerSurfaceCall & ObjectReadable; -export type ServerWritableStream< - RequestType, - ResponseType -> = ServerSurfaceCall & - ObjectWritable & { - request: RequestType; - end: (metadata?: Metadata) => void; - }; +export type ServerReadableStream = + ServerSurfaceCall & ObjectReadable; +export type ServerWritableStream = + ServerSurfaceCall & + ObjectWritable & { + request: RequestType; + end: (metadata?: Metadata) => void; + }; export type ServerDuplexStream = ServerSurfaceCall & ObjectReadable & ObjectWritable & { end: (metadata?: Metadata) => void }; -export class ServerUnaryCallImpl extends EventEmitter - implements ServerUnaryCall { +export function serverErrorToStatus( + error: ServerErrorResponse | ServerStatusResponse, + overrideTrailers?: Metadata | undefined +): PartialStatusObject { + const status: PartialStatusObject = { + code: Status.UNKNOWN, + details: 'message' in error ? error.message : 'Unknown Error', + metadata: overrideTrailers ?? error.metadata ?? null, + }; + + if ( + 'code' in error && + typeof error.code === 'number' && + Number.isInteger(error.code) + ) { + status.code = error.code; + + if ('details' in error && typeof error.details === 'string') { + status.details = error.details!; + } + } + return status; +} + +export class ServerUnaryCallImpl + extends EventEmitter + implements ServerUnaryCall +{ cancelled: boolean; constructor( - private call: Http2ServerCallStream, + private path: string, + private call: ServerInterceptingCallInterface, public metadata: Metadata, public request: RequestType ) { super(); this.cancelled = false; - this.call.setupSurfaceCall(this); } getPeer(): string { @@ -125,30 +105,29 @@ export class ServerUnaryCallImpl extends EventEmitter getDeadline(): Deadline { return this.call.getDeadline(); } + + getPath(): string { + return this.path; + } } export class ServerReadableStreamImpl extends Readable - implements ServerReadableStream { + implements ServerReadableStream +{ cancelled: boolean; constructor( - private call: Http2ServerCallStream, - public metadata: Metadata, - public deserialize: Deserialize + private path: string, + private call: ServerInterceptingCallInterface, + public metadata: Metadata ) { super({ objectMode: true }); this.cancelled = false; - this.call.setupSurfaceCall(this); - this.call.setupReadable(this); } _read(size: number) { - if (!this.call.consumeUnpushedMessages(this)) { - return; - } - - this.call.resume(); + this.call.startRead(); } getPeer(): string { @@ -162,27 +141,35 @@ export class ServerReadableStreamImpl getDeadline(): Deadline { return this.call.getDeadline(); } + + getPath(): string { + return this.path; + } } export class ServerWritableStreamImpl extends Writable - implements ServerWritableStream { + implements ServerWritableStream +{ cancelled: boolean; private trailingMetadata: Metadata; + private pendingStatus: PartialStatusObject = { + code: Status.OK, + details: 'OK', + }; constructor( - private call: Http2ServerCallStream, + private path: string, + private call: ServerInterceptingCallInterface, public metadata: Metadata, - public serialize: Serialize, public request: RequestType ) { super({ objectMode: true }); this.cancelled = false; this.trailingMetadata = new Metadata(); - this.call.setupSurfaceCall(this); - this.on('error', (err) => { - this.call.sendError(err); + this.on('error', err => { + this.pendingStatus = serverErrorToStatus(err); this.end(); }); } @@ -199,34 +186,25 @@ export class ServerWritableStreamImpl return this.call.getDeadline(); } + getPath(): string { + return this.path; + } + _write( chunk: ResponseType, encoding: string, // eslint-disable-next-line @typescript-eslint/no-explicit-any callback: (...args: any[]) => void ) { - try { - const response = this.call.serializeMessage(chunk); - - if (!this.call.write(response)) { - this.call.once('drain', callback); - return; - } - } catch (err) { - err.code = Status.INTERNAL; - this.emit('error', err); - } - - callback(); + this.call.sendMessage(chunk, callback); } _final(callback: Function): void { + callback(null); this.call.sendStatus({ - code: Status.OK, - details: 'OK', - metadata: this.trailingMetadata, + ...this.pendingStatus, + metadata: this.pendingStatus.metadata ?? this.trailingMetadata, }); - callback(null); } // eslint-disable-next-line @typescript-eslint/no-explicit-any @@ -235,29 +213,32 @@ export class ServerWritableStreamImpl this.trailingMetadata = metadata; } - super.end(); + return super.end(); } } -export class ServerDuplexStreamImpl extends Duplex - implements ServerDuplexStream { +export class ServerDuplexStreamImpl + extends Duplex + implements ServerDuplexStream +{ cancelled: boolean; private trailingMetadata: Metadata; + private pendingStatus: PartialStatusObject = { + code: Status.OK, + details: 'OK', + }; constructor( - private call: Http2ServerCallStream, - public metadata: Metadata, - public serialize: Serialize, - public deserialize: Deserialize + private path: string, + private call: ServerInterceptingCallInterface, + public metadata: Metadata ) { super({ objectMode: true }); this.cancelled = false; this.trailingMetadata = new Metadata(); - this.call.setupSurfaceCall(this); - this.call.setupReadable(this); - this.on('error', (err) => { - this.call.sendError(err); + this.on('error', err => { + this.pendingStatus = serverErrorToStatus(err); this.end(); }); } @@ -274,24 +255,41 @@ export class ServerDuplexStreamImpl extends Duplex return this.call.getDeadline(); } + getPath(): string { + return this.path; + } + + _read(size: number) { + this.call.startRead(); + } + + _write( + chunk: ResponseType, + encoding: string, + // eslint-disable-next-line @typescript-eslint/no-explicit-any + callback: (...args: any[]) => void + ) { + this.call.sendMessage(chunk, callback); + } + + _final(callback: Function): void { + callback(null); + this.call.sendStatus({ + ...this.pendingStatus, + metadata: this.pendingStatus.metadata ?? this.trailingMetadata, + }); + } + // eslint-disable-next-line @typescript-eslint/no-explicit-any end(metadata?: any) { if (metadata) { this.trailingMetadata = metadata; } - super.end(); + return super.end(); } } -ServerDuplexStreamImpl.prototype._read = - ServerReadableStreamImpl.prototype._read; -ServerDuplexStreamImpl.prototype._write = - ServerWritableStreamImpl.prototype._write; -ServerDuplexStreamImpl.prototype._final = - ServerWritableStreamImpl.prototype._final; -ServerDuplexStreamImpl.prototype.end = ServerWritableStreamImpl.prototype.end; - // Unary response callback signature. export type sendUnaryData = ( error: ServerErrorResponse | ServerStatusResponse | null, @@ -332,7 +330,7 @@ export interface UnaryHandler { func: handleUnaryCall; serialize: Serialize; deserialize: Deserialize; - type: HandlerType; + type: 'unary'; path: string; } @@ -340,7 +338,7 @@ export interface ClientStreamingHandler { func: handleClientStreamingCall; serialize: Serialize; deserialize: Deserialize; - type: HandlerType; + type: 'clientStream'; path: string; } @@ -348,7 +346,7 @@ export interface ServerStreamingHandler { func: handleServerStreamingCall; serialize: Serialize; deserialize: Deserialize; - type: HandlerType; + type: 'serverStream'; path: string; } @@ -356,7 +354,7 @@ export interface BidiStreamingHandler { func: handleBidiStreamingCall; serialize: Serialize; deserialize: Deserialize; - type: HandlerType; + type: 'bidi'; path: string; } @@ -367,452 +365,3 @@ export type Handler = | BidiStreamingHandler; export type HandlerType = 'bidi' | 'clientStream' | 'serverStream' | 'unary'; - -// Internal class that wraps the HTTP2 request. -export class Http2ServerCallStream< - RequestType, - ResponseType -> extends EventEmitter { - cancelled = false; - deadlineTimer: NodeJS.Timer = setTimeout(() => {}, 0); - private deadline: Deadline = Infinity; - private wantTrailers = false; - private metadataSent = false; - private canPush = false; - private isPushPending = false; - private bufferedMessages: Array = []; - private messagesToPush: Array = []; - private maxSendMessageSize: number = DEFAULT_MAX_SEND_MESSAGE_LENGTH; - private maxReceiveMessageSize: number = DEFAULT_MAX_RECEIVE_MESSAGE_LENGTH; - - constructor( - private stream: http2.ServerHttp2Stream, - private handler: Handler, - private options: ChannelOptions - ) { - super(); - - this.stream.once('error', (err: ServerErrorResponse) => { - /* We need an error handler to avoid uncaught error event exceptions, but - * there is nothing we can reasonably do here. Any error event should - * have a corresponding close event, which handles emitting the cancelled - * event. And the stream is now in a bad state, so we can't reasonably - * expect to be able to send an error over it. */ - }); - - this.stream.once('close', () => { - trace( - 'Request to method ' + - this.handler?.path + - ' stream closed with rstCode ' + - this.stream.rstCode - ); - this.cancelled = true; - this.emit('cancelled', 'cancelled'); - }); - - this.stream.on('drain', () => { - this.emit('drain'); - }); - - if ('grpc.max_send_message_length' in options) { - this.maxSendMessageSize = options['grpc.max_send_message_length']!; - } - if ('grpc.max_receive_message_length' in options) { - this.maxReceiveMessageSize = options['grpc.max_receive_message_length']!; - } - - // Clear noop timer - clearTimeout(this.deadlineTimer); - } - - private checkCancelled(): boolean { - /* In some cases the stream can become destroyed before the close event - * fires. That creates a race condition that this check works around */ - if (this.stream.destroyed || this.stream.closed) { - this.cancelled = true; - } - return this.cancelled; - } - - sendMetadata(customMetadata?: Metadata) { - if (this.checkCancelled()) { - return; - } - - if (this.metadataSent) { - return; - } - - this.metadataSent = true; - const custom = customMetadata ? customMetadata.toHttp2Headers() : null; - // TODO(cjihrig): Include compression headers. - const headers = Object.assign({}, defaultResponseHeaders, custom); - this.stream.respond(headers, defaultResponseOptions); - } - - receiveMetadata(headers: http2.IncomingHttpHeaders) { - const metadata = Metadata.fromHttp2Headers(headers); - - // TODO(cjihrig): Receive compression metadata. - - const timeoutHeader = metadata.get(GRPC_TIMEOUT_HEADER); - - if (timeoutHeader.length > 0) { - const match = timeoutHeader[0].toString().match(DEADLINE_REGEX); - - if (match === null) { - const err = new Error('Invalid deadline') as ServerErrorResponse; - err.code = Status.OUT_OF_RANGE; - this.sendError(err); - return; - } - - const timeout = (+match[1] * deadlineUnitsToMs[match[2]]) | 0; - - const now = new Date(); - this.deadline = now.setMilliseconds(now.getMilliseconds() + timeout); - this.deadlineTimer = setTimeout(handleExpiredDeadline, timeout, this); - metadata.remove(GRPC_TIMEOUT_HEADER); - } - - // Remove several headers that should not be propagated to the application - metadata.remove(http2.constants.HTTP2_HEADER_ACCEPT_ENCODING); - metadata.remove(http2.constants.HTTP2_HEADER_TE); - metadata.remove(http2.constants.HTTP2_HEADER_CONTENT_TYPE); - metadata.remove('grpc-encoding'); - metadata.remove('grpc-accept-encoding'); - - return metadata; - } - - receiveUnaryMessage(): Promise { - return new Promise((resolve, reject) => { - const stream = this.stream; - const chunks: Buffer[] = []; - let totalLength = 0; - - stream.on('data', (data: Buffer) => { - chunks.push(data); - totalLength += data.byteLength; - }); - - stream.once('end', async () => { - try { - const requestBytes = Buffer.concat(chunks, totalLength); - if ( - this.maxReceiveMessageSize !== -1 && - requestBytes.length > this.maxReceiveMessageSize - ) { - this.sendError({ - code: Status.RESOURCE_EXHAUSTED, - details: `Received message larger than max (${requestBytes.length} vs. ${this.maxReceiveMessageSize})`, - }); - resolve(); - } - - resolve(this.deserializeMessage(requestBytes)); - } catch (err) { - err.code = Status.INTERNAL; - this.sendError(err); - resolve(); - } - }); - }); - } - - serializeMessage(value: ResponseType) { - const messageBuffer = this.handler.serialize(value); - - // TODO(cjihrig): Call compression aware serializeMessage(). - const byteLength = messageBuffer.byteLength; - const output = Buffer.allocUnsafe(byteLength + 5); - output.writeUInt8(0, 0); - output.writeUInt32BE(byteLength, 1); - messageBuffer.copy(output, 5); - return output; - } - - deserializeMessage(bytes: Buffer) { - // TODO(cjihrig): Call compression aware deserializeMessage(). - const receivedMessage = bytes.slice(5); - - return this.handler.deserialize(receivedMessage); - } - - async sendUnaryMessage( - err: ServerErrorResponse | ServerStatusResponse | null, - value?: ResponseType | null, - metadata?: Metadata, - flags?: number - ) { - if (this.checkCancelled()) { - return; - } - if (!metadata) { - metadata = new Metadata(); - } - - if (err) { - if (!Object.prototype.hasOwnProperty.call(err, 'metadata')) { - err.metadata = metadata; - } - this.sendError(err); - return; - } - - try { - const response = this.serializeMessage(value!); - - this.write(response); - this.sendStatus({ code: Status.OK, details: 'OK', metadata }); - } catch (err) { - err.code = Status.INTERNAL; - this.sendError(err); - } - } - - sendStatus(statusObj: StatusObject) { - if (this.checkCancelled()) { - return; - } - - trace( - 'Request to method ' + - this.handler?.path + - ' ended with status code: ' + - Status[statusObj.code] + - ' details: ' + - statusObj.details - ); - - clearTimeout(this.deadlineTimer); - - if (!this.wantTrailers) { - this.wantTrailers = true; - this.stream.once('wantTrailers', () => { - const trailersToSend = Object.assign( - { - [GRPC_STATUS_HEADER]: statusObj.code, - [GRPC_MESSAGE_HEADER]: encodeURI(statusObj.details as string), - }, - statusObj.metadata.toHttp2Headers() - ); - - this.stream.sendTrailers(trailersToSend); - }); - this.sendMetadata(); - this.stream.end(); - } - } - - sendError(error: ServerErrorResponse | ServerStatusResponse) { - if (this.checkCancelled()) { - return; - } - const status: StatusObject = { - code: Status.UNKNOWN, - details: 'message' in error ? error.message : 'Unknown Error', - metadata: - 'metadata' in error && error.metadata !== undefined - ? error.metadata - : new Metadata(), - }; - - if ( - 'code' in error && - typeof error.code === 'number' && - Number.isInteger(error.code) - ) { - status.code = error.code; - - if ('details' in error && typeof error.details === 'string') { - status.details = error.details!; - } - } - - this.sendStatus(status); - } - - write(chunk: Buffer) { - if (this.checkCancelled()) { - return; - } - - if ( - this.maxSendMessageSize !== -1 && - chunk.length > this.maxSendMessageSize - ) { - this.sendError({ - code: Status.RESOURCE_EXHAUSTED, - details: `Sent message larger than max (${chunk.length} vs. ${this.maxSendMessageSize})`, - }); - return; - } - - this.sendMetadata(); - return this.stream.write(chunk); - } - - resume() { - this.stream.resume(); - } - - setupSurfaceCall(call: ServerSurfaceCall) { - this.once('cancelled', (reason) => { - call.cancelled = true; - call.emit('cancelled', reason); - }); - } - - setupReadable( - readable: - | ServerReadableStream - | ServerDuplexStream - ) { - const decoder = new StreamDecoder(); - - this.stream.on('data', async (data: Buffer) => { - const messages = decoder.write(data); - - for (const message of messages) { - if ( - this.maxReceiveMessageSize !== -1 && - message.length > this.maxReceiveMessageSize - ) { - this.sendError({ - code: Status.RESOURCE_EXHAUSTED, - details: `Received message larger than max (${message.length} vs. ${this.maxReceiveMessageSize})`, - }); - return; - } - this.pushOrBufferMessage(readable, message); - } - }); - - this.stream.once('end', () => { - this.pushOrBufferMessage(readable, null); - }); - } - - consumeUnpushedMessages( - readable: - | ServerReadableStream - | ServerDuplexStream - ): boolean { - this.canPush = true; - - while (this.messagesToPush.length > 0) { - const nextMessage = this.messagesToPush.shift(); - const canPush = readable.push(nextMessage); - - if (nextMessage === null || canPush === false) { - this.canPush = false; - break; - } - } - - return this.canPush; - } - - private pushOrBufferMessage( - readable: - | ServerReadableStream - | ServerDuplexStream, - messageBytes: Buffer | null - ): void { - if (this.isPushPending) { - this.bufferedMessages.push(messageBytes); - } else { - this.pushMessage(readable, messageBytes); - } - } - - private async pushMessage( - readable: - | ServerReadableStream - | ServerDuplexStream, - messageBytes: Buffer | null - ) { - if (messageBytes === null) { - if (this.canPush) { - readable.push(null); - } else { - this.messagesToPush.push(null); - } - - return; - } - - this.isPushPending = true; - - try { - const deserialized = await this.deserializeMessage(messageBytes); - - if (this.canPush) { - if (!readable.push(deserialized)) { - this.canPush = false; - this.stream.pause(); - } - } else { - this.messagesToPush.push(deserialized); - } - } catch (error) { - // Ignore any remaining messages when errors occur. - this.bufferedMessages.length = 0; - - if ( - !( - 'code' in error && - typeof error.code === 'number' && - Number.isInteger(error.code) && - error.code >= Status.OK && - error.code <= Status.UNAUTHENTICATED - ) - ) { - // The error code is not a valid gRPC code so its being overwritten. - error.code = Status.INTERNAL; - } - - readable.emit('error', error); - } - - this.isPushPending = false; - - if (this.bufferedMessages.length > 0) { - this.pushMessage( - readable, - this.bufferedMessages.shift() as Buffer | null - ); - } - } - - getPeer(): string { - const socket = this.stream.session.socket; - if (socket.remoteAddress) { - if (socket.remotePort) { - return `${socket.remoteAddress}:${socket.remotePort}`; - } else { - return socket.remoteAddress; - } - } else { - return 'unknown'; - } - } - - getDeadline(): Deadline { - return this.deadline; - } -} - -/* eslint-disable @typescript-eslint/no-explicit-any */ -type UntypedServerCall = Http2ServerCallStream; - -function handleExpiredDeadline(call: UntypedServerCall) { - const err = new Error('Deadline exceeded') as ServerErrorResponse; - err.code = Status.DEADLINE_EXCEEDED; - - call.sendError(err); - call.cancelled = true; - call.emit('cancelled', 'deadline'); -} diff --git a/packages/grpc-js/src/server-credentials.ts b/packages/grpc-js/src/server-credentials.ts index 17ab29805..0dd5f8cae 100644 --- a/packages/grpc-js/src/server-credentials.ts +++ b/packages/grpc-js/src/server-credentials.ts @@ -26,6 +26,7 @@ export interface KeyCertPair { export abstract class ServerCredentials { abstract _isSecure(): boolean; abstract _getSettings(): SecureServerOptions | null; + abstract _equals(other: ServerCredentials): boolean; static createInsecure(): ServerCredentials { return new InsecureServerCredentials(); @@ -48,8 +49,8 @@ export abstract class ServerCredentials { throw new TypeError('checkClientCertificate must be a boolean'); } - const cert = []; - const key = []; + const cert: Buffer[] = []; + const key: Buffer[] = []; for (let i = 0; i < keyCertPairs.length; i++) { const pair = keyCertPairs[i]; @@ -71,7 +72,7 @@ export abstract class ServerCredentials { } return new SecureServerCredentials({ - ca: rootCerts || getDefaultRootsData() || undefined, + ca: rootCerts ?? getDefaultRootsData() ?? undefined, cert, key, requestCert: checkClientCertificate, @@ -88,6 +89,10 @@ class InsecureServerCredentials extends ServerCredentials { _getSettings(): null { return null; } + + _equals(other: ServerCredentials): boolean { + return other instanceof InsecureServerCredentials; + } } class SecureServerCredentials extends ServerCredentials { @@ -105,4 +110,82 @@ class SecureServerCredentials extends ServerCredentials { _getSettings(): SecureServerOptions { return this.options; } + + /** + * Checks equality by checking the options that are actually set by + * createSsl. + * @param other + * @returns + */ + _equals(other: ServerCredentials): boolean { + if (this === other) { + return true; + } + if (!(other instanceof SecureServerCredentials)) { + return false; + } + // options.ca equality check + if (Buffer.isBuffer(this.options.ca) && Buffer.isBuffer(other.options.ca)) { + if (!this.options.ca.equals(other.options.ca)) { + return false; + } + } else { + if (this.options.ca !== other.options.ca) { + return false; + } + } + // options.cert equality check + if (Array.isArray(this.options.cert) && Array.isArray(other.options.cert)) { + if (this.options.cert.length !== other.options.cert.length) { + return false; + } + for (let i = 0; i < this.options.cert.length; i++) { + const thisCert = this.options.cert[i]; + const otherCert = other.options.cert[i]; + if (Buffer.isBuffer(thisCert) && Buffer.isBuffer(otherCert)) { + if (!thisCert.equals(otherCert)) { + return false; + } + } else { + if (thisCert !== otherCert) { + return false; + } + } + } + } else { + if (this.options.cert !== other.options.cert) { + return false; + } + } + // options.key equality check + if (Array.isArray(this.options.key) && Array.isArray(other.options.key)) { + if (this.options.key.length !== other.options.key.length) { + return false; + } + for (let i = 0; i < this.options.key.length; i++) { + const thisKey = this.options.key[i]; + const otherKey = other.options.key[i]; + if (Buffer.isBuffer(thisKey) && Buffer.isBuffer(otherKey)) { + if (!thisKey.equals(otherKey)) { + return false; + } + } else { + if (thisKey !== otherKey) { + return false; + } + } + } + } else { + if (this.options.key !== other.options.key) { + return false; + } + } + // options.requestCert equality check + if (this.options.requestCert !== other.options.requestCert) { + return false; + } + /* ciphers is derived from a value that is constant for the process, so no + * equality check is needed. */ + return true; + } } diff --git a/packages/grpc-js/src/server-interceptors.ts b/packages/grpc-js/src/server-interceptors.ts new file mode 100644 index 000000000..c2d985a6a --- /dev/null +++ b/packages/grpc-js/src/server-interceptors.ts @@ -0,0 +1,987 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { PartialStatusObject } from './call-interface'; +import { ServerMethodDefinition } from './make-client'; +import { Metadata } from './metadata'; +import { ChannelOptions } from './channel-options'; +import { Handler, ServerErrorResponse } from './server-call'; +import { Deadline } from './deadline'; +import { + DEFAULT_MAX_RECEIVE_MESSAGE_LENGTH, + DEFAULT_MAX_SEND_MESSAGE_LENGTH, + LogVerbosity, + Status, +} from './constants'; +import * as http2 from 'http2'; +import { getErrorMessage } from './error'; +import * as zlib from 'zlib'; +import { StreamDecoder } from './stream-decoder'; +import { CallEventTracker } from './transport'; +import * as logging from './logging'; + +const TRACER_NAME = 'server_call'; + +function trace(text: string) { + logging.trace(LogVerbosity.DEBUG, TRACER_NAME, text); +} + +export interface ServerMetadataListener { + (metadata: Metadata, next: (metadata: Metadata) => void): void; +} + +export interface ServerMessageListener { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (message: any, next: (message: any) => void): void; +} + +export interface ServerHalfCloseListener { + (next: () => void): void; +} + +export interface ServerCancelListener { + (): void; +} + +export interface FullServerListener { + onReceiveMetadata: ServerMetadataListener; + onReceiveMessage: ServerMessageListener; + onReceiveHalfClose: ServerHalfCloseListener; + onCancel: ServerCancelListener; +} + +export type ServerListener = Partial; + +export class ServerListenerBuilder { + private metadata: ServerMetadataListener | undefined = undefined; + private message: ServerMessageListener | undefined = undefined; + private halfClose: ServerHalfCloseListener | undefined = undefined; + private cancel: ServerCancelListener | undefined = undefined; + + withOnReceiveMetadata(onReceiveMetadata: ServerMetadataListener): this { + this.metadata = onReceiveMetadata; + return this; + } + + withOnReceiveMessage(onReceiveMessage: ServerMessageListener): this { + this.message = onReceiveMessage; + return this; + } + + withOnReceiveHalfClose(onReceiveHalfClose: ServerHalfCloseListener): this { + this.halfClose = onReceiveHalfClose; + return this; + } + + withOnCancel(onCancel: ServerCancelListener): this { + this.cancel = onCancel; + return this; + } + + build(): ServerListener { + return { + onReceiveMetadata: this.metadata, + onReceiveMessage: this.message, + onReceiveHalfClose: this.halfClose, + onCancel: this.cancel, + }; + } +} + +export interface InterceptingServerListener { + onReceiveMetadata(metadata: Metadata): void; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + onReceiveMessage(message: any): void; + onReceiveHalfClose(): void; + onCancel(): void; +} + +export function isInterceptingServerListener( + listener: ServerListener | InterceptingServerListener +): listener is InterceptingServerListener { + return ( + listener.onReceiveMetadata !== undefined && + listener.onReceiveMetadata.length === 1 + ); +} + +class InterceptingServerListenerImpl implements InterceptingServerListener { + /** + * Once the call is cancelled, ignore all other events. + */ + private cancelled = false; + private processingMetadata = false; + private hasPendingMessage = false; + private pendingMessage: any = null; + private processingMessage = false; + private hasPendingHalfClose = false; + + constructor( + private listener: FullServerListener, + private nextListener: InterceptingServerListener + ) {} + + private processPendingMessage() { + if (this.hasPendingMessage) { + this.nextListener.onReceiveMessage(this.pendingMessage); + this.pendingMessage = null; + this.hasPendingMessage = false; + } + } + + private processPendingHalfClose() { + if (this.hasPendingHalfClose) { + this.nextListener.onReceiveHalfClose(); + this.hasPendingHalfClose = false; + } + } + + onReceiveMetadata(metadata: Metadata): void { + if (this.cancelled) { + return; + } + this.processingMetadata = true; + this.listener.onReceiveMetadata(metadata, interceptedMetadata => { + this.processingMetadata = false; + if (this.cancelled) { + return; + } + this.nextListener.onReceiveMetadata(interceptedMetadata); + this.processPendingMessage(); + this.processPendingHalfClose(); + }); + } + onReceiveMessage(message: any): void { + if (this.cancelled) { + return; + } + this.processingMessage = true; + this.listener.onReceiveMessage(message, msg => { + this.processingMessage = false; + if (this.cancelled) { + return; + } + if (this.processingMetadata) { + this.pendingMessage = msg; + this.hasPendingMessage = true; + } else { + this.nextListener.onReceiveMessage(msg); + this.processPendingHalfClose(); + } + }); + } + onReceiveHalfClose(): void { + if (this.cancelled) { + return; + } + this.listener.onReceiveHalfClose(() => { + if (this.cancelled) { + return; + } + if (this.processingMetadata || this.processingMessage) { + this.hasPendingHalfClose = true; + } else { + this.nextListener.onReceiveHalfClose(); + } + }); + } + onCancel(): void { + this.cancelled = true; + this.listener.onCancel(); + this.nextListener.onCancel(); + } +} + +export interface StartResponder { + (next: (listener?: ServerListener) => void): void; +} + +export interface MetadataResponder { + (metadata: Metadata, next: (metadata: Metadata) => void): void; +} + +export interface MessageResponder { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (message: any, next: (message: any) => void): void; +} + +export interface StatusResponder { + ( + status: PartialStatusObject, + next: (status: PartialStatusObject) => void + ): void; +} + +export interface FullResponder { + start: StartResponder; + sendMetadata: MetadataResponder; + sendMessage: MessageResponder; + sendStatus: StatusResponder; +} + +export type Responder = Partial; + +export class ResponderBuilder { + private start: StartResponder | undefined = undefined; + private metadata: MetadataResponder | undefined = undefined; + private message: MessageResponder | undefined = undefined; + private status: StatusResponder | undefined = undefined; + + withStart(start: StartResponder): this { + this.start = start; + return this; + } + + withSendMetadata(sendMetadata: MetadataResponder): this { + this.metadata = sendMetadata; + return this; + } + + withSendMessage(sendMessage: MessageResponder): this { + this.message = sendMessage; + return this; + } + + withSendStatus(sendStatus: StatusResponder): this { + this.status = sendStatus; + return this; + } + + build(): Responder { + return { + start: this.start, + sendMetadata: this.metadata, + sendMessage: this.message, + sendStatus: this.status, + }; + } +} + +const defaultServerListener: FullServerListener = { + onReceiveMetadata: (metadata, next) => { + next(metadata); + }, + onReceiveMessage: (message, next) => { + next(message); + }, + onReceiveHalfClose: next => { + next(); + }, + onCancel: () => {}, +}; + +const defaultResponder: FullResponder = { + start: next => { + next(); + }, + sendMetadata: (metadata, next) => { + next(metadata); + }, + sendMessage: (message, next) => { + next(message); + }, + sendStatus: (status, next) => { + next(status); + }, +}; + +export interface ServerInterceptingCallInterface { + /** + * Register the listener to handle inbound events. + */ + start(listener: InterceptingServerListener): void; + /** + * Send response metadata. + */ + sendMetadata(metadata: Metadata): void; + /** + * Send a response message. + */ + sendMessage(message: any, callback: () => void): void; + /** + * End the call by sending this status. + */ + sendStatus(status: PartialStatusObject): void; + /** + * Start a single read, eventually triggering either listener.onReceiveMessage or listener.onReceiveHalfClose. + */ + startRead(): void; + /** + * Return the peer address of the client making the request, if known, or "unknown" otherwise + */ + getPeer(): string; + /** + * Return the call deadline set by the client. The value is Infinity if there is no deadline. + */ + getDeadline(): Deadline; +} + +export class ServerInterceptingCall implements ServerInterceptingCallInterface { + private responder: FullResponder; + private processingMetadata = false; + private processingMessage = false; + private pendingMessage: any = null; + private pendingMessageCallback: (() => void) | null = null; + private pendingStatus: PartialStatusObject | null = null; + constructor( + private nextCall: ServerInterceptingCallInterface, + responder?: Responder + ) { + this.responder = { + start: responder?.start ?? defaultResponder.start, + sendMetadata: responder?.sendMetadata ?? defaultResponder.sendMetadata, + sendMessage: responder?.sendMessage ?? defaultResponder.sendMessage, + sendStatus: responder?.sendStatus ?? defaultResponder.sendStatus, + }; + } + + private processPendingMessage() { + if (this.pendingMessageCallback) { + this.nextCall.sendMessage( + this.pendingMessage, + this.pendingMessageCallback + ); + this.pendingMessage = null; + this.pendingMessageCallback = null; + } + } + + private processPendingStatus() { + if (this.pendingStatus) { + this.nextCall.sendStatus(this.pendingStatus); + this.pendingStatus = null; + } + } + + start(listener: InterceptingServerListener): void { + this.responder.start(interceptedListener => { + const fullInterceptedListener: FullServerListener = { + onReceiveMetadata: + interceptedListener?.onReceiveMetadata ?? + defaultServerListener.onReceiveMetadata, + onReceiveMessage: + interceptedListener?.onReceiveMessage ?? + defaultServerListener.onReceiveMessage, + onReceiveHalfClose: + interceptedListener?.onReceiveHalfClose ?? + defaultServerListener.onReceiveHalfClose, + onCancel: + interceptedListener?.onCancel ?? defaultServerListener.onCancel, + }; + const finalInterceptingListener = new InterceptingServerListenerImpl( + fullInterceptedListener, + listener + ); + this.nextCall.start(finalInterceptingListener); + }); + } + sendMetadata(metadata: Metadata): void { + this.processingMetadata = true; + this.responder.sendMetadata(metadata, interceptedMetadata => { + this.processingMetadata = false; + this.nextCall.sendMetadata(interceptedMetadata); + this.processPendingMessage(); + this.processPendingStatus(); + }); + } + sendMessage(message: any, callback: () => void): void { + this.processingMessage = true; + this.responder.sendMessage(message, interceptedMessage => { + this.processingMessage = false; + if (this.processingMetadata) { + this.pendingMessage = interceptedMessage; + this.pendingMessageCallback = callback; + } else { + this.nextCall.sendMessage(interceptedMessage, callback); + } + }); + } + sendStatus(status: PartialStatusObject): void { + this.responder.sendStatus(status, interceptedStatus => { + if (this.processingMetadata || this.processingMessage) { + this.pendingStatus = interceptedStatus; + } else { + this.nextCall.sendStatus(interceptedStatus); + } + }); + } + startRead(): void { + this.nextCall.startRead(); + } + getPeer(): string { + return this.nextCall.getPeer(); + } + getDeadline(): Deadline { + return this.nextCall.getDeadline(); + } +} + +export interface ServerInterceptor { + ( + methodDescriptor: ServerMethodDefinition, + call: ServerInterceptingCallInterface + ): ServerInterceptingCall; +} + +interface DeadlineUnitIndexSignature { + [name: string]: number; +} + +const GRPC_ACCEPT_ENCODING_HEADER = 'grpc-accept-encoding'; +const GRPC_ENCODING_HEADER = 'grpc-encoding'; +const GRPC_MESSAGE_HEADER = 'grpc-message'; +const GRPC_STATUS_HEADER = 'grpc-status'; +const GRPC_TIMEOUT_HEADER = 'grpc-timeout'; +const DEADLINE_REGEX = /(\d{1,8})\s*([HMSmun])/; +const deadlineUnitsToMs: DeadlineUnitIndexSignature = { + H: 3600000, + M: 60000, + S: 1000, + m: 1, + u: 0.001, + n: 0.000001, +}; + +const defaultCompressionHeaders = { + // TODO(cjihrig): Remove these encoding headers from the default response + // once compression is integrated. + [GRPC_ACCEPT_ENCODING_HEADER]: 'identity,deflate,gzip', + [GRPC_ENCODING_HEADER]: 'identity', +}; +const defaultResponseHeaders = { + [http2.constants.HTTP2_HEADER_STATUS]: http2.constants.HTTP_STATUS_OK, + [http2.constants.HTTP2_HEADER_CONTENT_TYPE]: 'application/grpc+proto', +}; +const defaultResponseOptions = { + waitForTrailers: true, +} as http2.ServerStreamResponseOptions; + +type ReadQueueEntryType = 'COMPRESSED' | 'READABLE' | 'HALF_CLOSE'; + +interface ReadQueueEntry { + type: ReadQueueEntryType; + compressedMessage: Buffer | null; + parsedMessage: any; +} + +export class BaseServerInterceptingCall + implements ServerInterceptingCallInterface +{ + private listener: InterceptingServerListener | null = null; + private metadata: Metadata; + private deadlineTimer: NodeJS.Timeout | null = null; + private deadline: Deadline = Infinity; + private maxSendMessageSize: number = DEFAULT_MAX_SEND_MESSAGE_LENGTH; + private maxReceiveMessageSize: number = DEFAULT_MAX_RECEIVE_MESSAGE_LENGTH; + private cancelled = false; + private metadataSent = false; + private wantTrailers = false; + private cancelNotified = false; + private incomingEncoding = 'identity'; + private decoder: StreamDecoder; + private readQueue: ReadQueueEntry[] = []; + private isReadPending = false; + private receivedHalfClose = false; + private streamEnded = false; + + constructor( + private readonly stream: http2.ServerHttp2Stream, + headers: http2.IncomingHttpHeaders, + private readonly callEventTracker: CallEventTracker | null, + private readonly handler: Handler, + options: ChannelOptions + ) { + this.stream.once('error', (err: ServerErrorResponse) => { + /* We need an error handler to avoid uncaught error event exceptions, but + * there is nothing we can reasonably do here. Any error event should + * have a corresponding close event, which handles emitting the cancelled + * event. And the stream is now in a bad state, so we can't reasonably + * expect to be able to send an error over it. */ + }); + + this.stream.once('close', () => { + trace( + 'Request to method ' + + this.handler?.path + + ' stream closed with rstCode ' + + this.stream.rstCode + ); + + if (this.callEventTracker && !this.streamEnded) { + this.streamEnded = true; + this.callEventTracker.onStreamEnd(false); + this.callEventTracker.onCallEnd({ + code: Status.CANCELLED, + details: 'Stream closed before sending status', + metadata: null, + }); + } + + this.notifyOnCancel(); + }); + + this.stream.on('data', (data: Buffer) => { + this.handleDataFrame(data); + }); + this.stream.pause(); + + this.stream.on('end', () => { + this.handleEndEvent(); + }); + + if ('grpc.max_send_message_length' in options) { + this.maxSendMessageSize = options['grpc.max_send_message_length']!; + } + if ('grpc.max_receive_message_length' in options) { + this.maxReceiveMessageSize = options['grpc.max_receive_message_length']!; + } + + this.decoder = new StreamDecoder(this.maxReceiveMessageSize); + + const metadata = Metadata.fromHttp2Headers(headers); + + if (logging.isTracerEnabled(TRACER_NAME)) { + trace( + 'Request to ' + + this.handler.path + + ' received headers ' + + JSON.stringify(metadata.toJSON()) + ); + } + + const timeoutHeader = metadata.get(GRPC_TIMEOUT_HEADER); + + if (timeoutHeader.length > 0) { + this.handleTimeoutHeader(timeoutHeader[0] as string); + } + + const encodingHeader = metadata.get(GRPC_ENCODING_HEADER); + + if (encodingHeader.length > 0) { + this.incomingEncoding = encodingHeader[0] as string; + } + + // Remove several headers that should not be propagated to the application + metadata.remove(GRPC_TIMEOUT_HEADER); + metadata.remove(GRPC_ENCODING_HEADER); + metadata.remove(GRPC_ACCEPT_ENCODING_HEADER); + metadata.remove(http2.constants.HTTP2_HEADER_ACCEPT_ENCODING); + metadata.remove(http2.constants.HTTP2_HEADER_TE); + metadata.remove(http2.constants.HTTP2_HEADER_CONTENT_TYPE); + this.metadata = metadata; + } + + private handleTimeoutHeader(timeoutHeader: string) { + const match = timeoutHeader.toString().match(DEADLINE_REGEX); + + if (match === null) { + const status: PartialStatusObject = { + code: Status.INTERNAL, + details: `Invalid ${GRPC_TIMEOUT_HEADER} value "${timeoutHeader}"`, + metadata: null, + }; + // Wait for the constructor to complete before sending the error. + process.nextTick(() => { + this.sendStatus(status); + }); + return; + } + + const timeout = (+match[1] * deadlineUnitsToMs[match[2]]) | 0; + + const now = new Date(); + this.deadline = now.setMilliseconds(now.getMilliseconds() + timeout); + this.deadlineTimer = setTimeout(() => { + const status: PartialStatusObject = { + code: Status.DEADLINE_EXCEEDED, + details: 'Deadline exceeded', + metadata: null, + }; + this.sendStatus(status); + }, timeout); + } + + private checkCancelled(): boolean { + /* In some cases the stream can become destroyed before the close event + * fires. That creates a race condition that this check works around */ + if (!this.cancelled && (this.stream.destroyed || this.stream.closed)) { + this.notifyOnCancel(); + this.cancelled = true; + } + return this.cancelled; + } + private notifyOnCancel() { + if (this.cancelNotified) { + return; + } + this.cancelNotified = true; + this.cancelled = true; + process.nextTick(() => { + this.listener?.onCancel(); + }); + if (this.deadlineTimer) { + clearTimeout(this.deadlineTimer); + } + // Flush incoming data frames + this.stream.resume(); + } + + /** + * A server handler can start sending messages without explicitly sending + * metadata. In that case, we need to send headers before sending any + * messages. This function does that if necessary. + */ + private maybeSendMetadata() { + if (!this.metadataSent) { + this.sendMetadata(new Metadata()); + } + } + + /** + * Serialize a message to a length-delimited byte string. + * @param value + * @returns + */ + private serializeMessage(value: any) { + const messageBuffer = this.handler.serialize(value); + const byteLength = messageBuffer.byteLength; + const output = Buffer.allocUnsafe(byteLength + 5); + /* Note: response compression is currently not supported, so this + * compressed bit is always 0. */ + output.writeUInt8(0, 0); + output.writeUInt32BE(byteLength, 1); + messageBuffer.copy(output, 5); + return output; + } + + private decompressMessage( + message: Buffer, + encoding: string + ): Buffer | Promise { + const messageContents = message.subarray(5); + if (encoding === 'identity') { + return messageContents; + } else if (encoding === 'deflate' || encoding === 'gzip') { + let decompresser: zlib.Gunzip | zlib.Deflate; + if (encoding === 'deflate') { + decompresser = zlib.createInflate(); + } else { + decompresser = zlib.createGunzip(); + } + return new Promise((resolve, reject) => { + let totalLength = 0 + const messageParts: Buffer[] = []; + decompresser.on('data', (chunk: Buffer) => { + messageParts.push(chunk); + totalLength += chunk.byteLength; + if (this.maxReceiveMessageSize !== -1 && totalLength > this.maxReceiveMessageSize) { + decompresser.destroy(); + reject({ + code: Status.RESOURCE_EXHAUSTED, + details: `Received message that decompresses to a size larger than ${this.maxReceiveMessageSize}` + }); + } + }); + decompresser.on('end', () => { + resolve(Buffer.concat(messageParts)); + }); + decompresser.write(messageContents); + decompresser.end(); + }); + } else { + return Promise.reject({ + code: Status.UNIMPLEMENTED, + details: `Received message compressed with unsupported encoding "${encoding}"`, + }); + } + } + + private async decompressAndMaybePush(queueEntry: ReadQueueEntry) { + if (queueEntry.type !== 'COMPRESSED') { + throw new Error(`Invalid queue entry type: ${queueEntry.type}`); + } + + const compressed = queueEntry.compressedMessage!.readUInt8(0) === 1; + const compressedMessageEncoding = compressed + ? this.incomingEncoding + : 'identity'; + let decompressedMessage: Buffer; + try { + decompressedMessage = await this.decompressMessage( + queueEntry.compressedMessage!, + compressedMessageEncoding + ); + } catch (err) { + this.sendStatus(err as PartialStatusObject); + return; + } + try { + queueEntry.parsedMessage = this.handler.deserialize(decompressedMessage); + } catch (err) { + this.sendStatus({ + code: Status.INTERNAL, + details: `Error deserializing request: ${(err as Error).message}`, + }); + return; + } + queueEntry.type = 'READABLE'; + this.maybePushNextMessage(); + } + + private maybePushNextMessage() { + if ( + this.listener && + this.isReadPending && + this.readQueue.length > 0 && + this.readQueue[0].type !== 'COMPRESSED' + ) { + this.isReadPending = false; + const nextQueueEntry = this.readQueue.shift()!; + if (nextQueueEntry.type === 'READABLE') { + this.listener.onReceiveMessage(nextQueueEntry.parsedMessage); + } else { + // nextQueueEntry.type === 'HALF_CLOSE' + this.listener.onReceiveHalfClose(); + } + } + } + + private handleDataFrame(data: Buffer) { + if (this.checkCancelled()) { + return; + } + trace( + 'Request to ' + + this.handler.path + + ' received data frame of size ' + + data.length + ); + let rawMessages: Buffer[]; + try { + rawMessages = this.decoder.write(data); + } catch (e) { + this.sendStatus({ code: Status.RESOURCE_EXHAUSTED, details: (e as Error).message }); + return; + } + + for (const messageBytes of rawMessages) { + this.stream.pause(); + const queueEntry: ReadQueueEntry = { + type: 'COMPRESSED', + compressedMessage: messageBytes, + parsedMessage: null, + }; + this.readQueue.push(queueEntry); + this.decompressAndMaybePush(queueEntry); + this.callEventTracker?.addMessageReceived(); + } + } + private handleEndEvent() { + this.readQueue.push({ + type: 'HALF_CLOSE', + compressedMessage: null, + parsedMessage: null, + }); + this.receivedHalfClose = true; + this.maybePushNextMessage(); + } + start(listener: InterceptingServerListener): void { + trace('Request to ' + this.handler.path + ' start called'); + if (this.checkCancelled()) { + return; + } + this.listener = listener; + listener.onReceiveMetadata(this.metadata); + } + sendMetadata(metadata: Metadata): void { + if (this.checkCancelled()) { + return; + } + + if (this.metadataSent) { + return; + } + + this.metadataSent = true; + const custom = metadata ? metadata.toHttp2Headers() : null; + const headers = { + ...defaultResponseHeaders, + ...defaultCompressionHeaders, + ...custom, + }; + this.stream.respond(headers, defaultResponseOptions); + } + sendMessage(message: any, callback: () => void): void { + if (this.checkCancelled()) { + return; + } + let response: Buffer; + try { + response = this.serializeMessage(message); + } catch (e) { + this.sendStatus({ + code: Status.INTERNAL, + details: `Error serializing response: ${getErrorMessage(e)}`, + metadata: null, + }); + return; + } + + if ( + this.maxSendMessageSize !== -1 && + response.length - 5 > this.maxSendMessageSize + ) { + this.sendStatus({ + code: Status.RESOURCE_EXHAUSTED, + details: `Sent message larger than max (${response.length} vs. ${this.maxSendMessageSize})`, + metadata: null, + }); + return; + } + this.maybeSendMetadata(); + trace( + 'Request to ' + + this.handler.path + + ' sent data frame of size ' + + response.length + ); + this.stream.write(response, error => { + if (error) { + this.sendStatus({ + code: Status.INTERNAL, + details: `Error writing message: ${getErrorMessage(error)}`, + metadata: null, + }); + return; + } + this.callEventTracker?.addMessageSent(); + callback(); + }); + } + sendStatus(status: PartialStatusObject): void { + if (this.checkCancelled()) { + return; + } + + trace( + 'Request to method ' + + this.handler?.path + + ' ended with status code: ' + + Status[status.code] + + ' details: ' + + status.details + ); + + if (this.metadataSent) { + if (!this.wantTrailers) { + this.wantTrailers = true; + this.stream.once('wantTrailers', () => { + if (this.callEventTracker && !this.streamEnded) { + this.streamEnded = true; + this.callEventTracker.onStreamEnd(true); + this.callEventTracker.onCallEnd(status); + } + const trailersToSend = { + [GRPC_STATUS_HEADER]: status.code, + [GRPC_MESSAGE_HEADER]: encodeURI(status.details), + ...status.metadata?.toHttp2Headers(), + }; + + this.stream.sendTrailers(trailersToSend); + this.notifyOnCancel(); + }); + this.stream.end(); + } else { + this.notifyOnCancel(); + } + } else { + if (this.callEventTracker && !this.streamEnded) { + this.streamEnded = true; + this.callEventTracker.onStreamEnd(true); + this.callEventTracker.onCallEnd(status); + } + // Trailers-only response + const trailersToSend = { + [GRPC_STATUS_HEADER]: status.code, + [GRPC_MESSAGE_HEADER]: encodeURI(status.details), + ...defaultResponseHeaders, + ...status.metadata?.toHttp2Headers(), + }; + this.stream.respond(trailersToSend, { endStream: true }); + this.notifyOnCancel(); + } + } + startRead(): void { + trace('Request to ' + this.handler.path + ' startRead called'); + if (this.checkCancelled()) { + return; + } + this.isReadPending = true; + if (this.readQueue.length === 0) { + if (!this.receivedHalfClose) { + this.stream.resume(); + } + } else { + this.maybePushNextMessage(); + } + } + getPeer(): string { + const socket = this.stream.session?.socket; + if (socket?.remoteAddress) { + if (socket.remotePort) { + return `${socket.remoteAddress}:${socket.remotePort}`; + } else { + return socket.remoteAddress; + } + } else { + return 'unknown'; + } + } + getDeadline(): Deadline { + return this.deadline; + } +} + +export function getServerInterceptingCall( + interceptors: ServerInterceptor[], + stream: http2.ServerHttp2Stream, + headers: http2.IncomingHttpHeaders, + callEventTracker: CallEventTracker | null, + handler: Handler, + options: ChannelOptions +) { + const methodDefinition: ServerMethodDefinition = { + path: handler.path, + requestStream: handler.type === 'clientStream' || handler.type === 'bidi', + responseStream: handler.type === 'serverStream' || handler.type === 'bidi', + requestDeserialize: handler.deserialize, + responseSerialize: handler.serialize, + }; + const baseCall = new BaseServerInterceptingCall( + stream, + headers, + callEventTracker, + handler, + options + ); + return interceptors.reduce( + (call: ServerInterceptingCallInterface, interceptor: ServerInterceptor) => { + return interceptor(methodDefinition, call); + }, + baseCall + ); +} diff --git a/packages/grpc-js/src/server.ts b/packages/grpc-js/src/server.ts index 255e210b6..feb511b41 100644 --- a/packages/grpc-js/src/server.ts +++ b/packages/grpc-js/src/server.ts @@ -16,7 +16,7 @@ */ import * as http2 from 'http2'; -import { AddressInfo } from 'net'; +import * as util from 'util'; import { ServiceError } from './call'; import { Status, LogVerbosity } from './constants'; @@ -28,20 +28,18 @@ import { HandleCall, Handler, HandlerType, - Http2ServerCallStream, sendUnaryData, ServerDuplexStream, ServerDuplexStreamImpl, ServerReadableStream, - ServerReadableStreamImpl, ServerStreamingHandler, ServerUnaryCall, - ServerUnaryCallImpl, ServerWritableStream, ServerWritableStreamImpl, UnaryHandler, ServerErrorResponse, ServerStatusResponse, + serverErrorToStatus, } from './server-call'; import { ServerCredentials } from './server-credentials'; import { ChannelOptions } from './channel-options'; @@ -53,32 +51,90 @@ import { import * as logging from './logging'; import { SubchannelAddress, - TcpSubchannelAddress, isTcpSubchannelAddress, subchannelAddressToString, -} from './subchannel'; -import { parseUri } from './uri-parser'; + stringToSubchannelAddress, +} from './subchannel-address'; +import { + GrpcUri, + combineHostPort, + parseUri, + splitHostPort, + uriToString, +} from './uri-parser'; +import { + ChannelzCallTracker, + ChannelzCallTrackerStub, + ChannelzChildrenTracker, + ChannelzChildrenTrackerStub, + ChannelzTrace, + ChannelzTraceStub, + registerChannelzServer, + registerChannelzSocket, + ServerInfo, + ServerRef, + SocketInfo, + SocketRef, + TlsInfo, + unregisterChannelzRef, +} from './channelz'; +import { CipherNameAndProtocol, TLSSocket } from 'tls'; +import { + ServerInterceptingCallInterface, + ServerInterceptor, + getServerInterceptingCall, +} from './server-interceptors'; +import { PartialStatusObject } from './call-interface'; +import { CallEventTracker } from './transport'; + +const UNLIMITED_CONNECTION_AGE_MS = ~(1 << 31); +const KEEPALIVE_MAX_TIME_MS = ~(1 << 31); +const KEEPALIVE_TIMEOUT_MS = 20000; +const MAX_CONNECTION_IDLE_MS = ~(1 << 31); + +const { HTTP2_HEADER_PATH } = http2.constants; const TRACER_NAME = 'server'; +const kMaxAge = Buffer.from('max_age'); -function trace(text: string): void { - logging.trace(LogVerbosity.DEBUG, TRACER_NAME, text); -} +type AnyHttp2Server = http2.Http2Server | http2.Http2SecureServer; interface BindResult { port: number; count: number; + errors: string[]; +} + +interface SingleAddressBindResult { + port: number; + error?: string; } function noop(): void {} +/** + * Decorator to wrap a class method with util.deprecate + * @param message The message to output if the deprecated method is called + * @returns + */ +function deprecate(message: string) { + return function ( + target: (this: This, ...args: Args) => Return, + context: ClassMethodDecoratorContext< + This, + (this: This, ...args: Args) => Return + > + ) { + return util.deprecate(target, message); + }; +} + function getUnimplementedStatusResponse( methodName: string -): Partial { +): PartialStatusObject { return { code: Status.UNIMPLEMENTED, details: `The server does not implement the method ${methodName}`, - metadata: new Metadata(), }; } @@ -94,9 +150,8 @@ export interface UntypedServiceImplementation { } function getDefaultHandler(handlerType: HandlerType, methodName: string) { - const unimplementedStatusResponse = getUnimplementedStatusResponse( - methodName - ); + const unimplementedStatusResponse = + getUnimplementedStatusResponse(methodName); switch (handlerType) { case 'unary': return ( @@ -125,22 +180,262 @@ function getDefaultHandler(handlerType: HandlerType, methodName: string) { } } +interface ChannelzSessionInfo { + ref: SocketRef; + streamTracker: ChannelzCallTracker | ChannelzCallTrackerStub; + messagesSent: number; + messagesReceived: number; + keepAlivesSent: number; + lastMessageSentTimestamp: Date | null; + lastMessageReceivedTimestamp: Date | null; +} + +/** + * Information related to a single invocation of bindAsync. This should be + * tracked in a map keyed by target string, normalized with a pass through + * parseUri -> mapUriDefaultScheme -> uriToString. If the target has a port + * number and the port number is 0, the target string is modified with the + * concrete bound port. + */ +interface BoundPort { + /** + * The key used to refer to this object in the boundPorts map. + */ + mapKey: string; + /** + * The target string, passed through parseUri -> mapUriDefaultScheme. Used + * to determine the final key when the port number is 0. + */ + originalUri: GrpcUri; + /** + * If there is a pending bindAsync operation, this is a promise that resolves + * with the port number when that operation succeeds. If there is no such + * operation pending, this is null. + */ + completionPromise: Promise | null; + /** + * The port number that was actually bound. Populated only after + * completionPromise resolves. + */ + portNumber: number; + /** + * Set by unbind if called while pending is true. + */ + cancelled: boolean; + /** + * The credentials object passed to the original bindAsync call. + */ + credentials: ServerCredentials; + /** + * The set of servers associated with this listening port. A target string + * that expands to multiple addresses will result in multiple listening + * servers. + */ + listeningServers: Set; +} + +/** + * Should be in a map keyed by AnyHttp2Server. + */ +interface Http2ServerInfo { + channelzRef: SocketRef; + sessions: Set; +} + +interface SessionIdleTimeoutTracker { + activeStreams: number; + lastIdle: number; + timeout: NodeJS.Timeout; + onClose: (session: http2.ServerHttp2Session) => void | null; +} + +export interface ServerOptions extends ChannelOptions { + interceptors?: ServerInterceptor[]; +} + export class Server { - private http2ServerList: (http2.Http2Server | http2.Http2SecureServer)[] = []; + private boundPorts: Map = new Map(); + private http2Servers: Map = new Map(); + private sessionIdleTimeouts = new Map< + http2.ServerHttp2Session, + SessionIdleTimeoutTracker + >(); private handlers: Map = new Map< string, UntypedHandler >(); - private sessions = new Set(); + private sessions = new Map(); + /** + * This field only exists to ensure that the start method throws an error if + * it is called twice, as it did previously. + */ private started = false; - private options: ChannelOptions; - - constructor(options?: ChannelOptions) { + private shutdown = false; + private options: ServerOptions; + private serverAddressString = 'null'; + + // Channelz Info + private readonly channelzEnabled: boolean = true; + private channelzRef: ServerRef; + private channelzTrace: ChannelzTrace | ChannelzTraceStub; + private callTracker: ChannelzCallTracker | ChannelzCallTrackerStub; + private listenerChildrenTracker: + | ChannelzChildrenTracker + | ChannelzChildrenTrackerStub; + private sessionChildrenTracker: + | ChannelzChildrenTracker + | ChannelzChildrenTrackerStub; + + private readonly maxConnectionAgeMs: number; + private readonly maxConnectionAgeGraceMs: number; + + private readonly keepaliveTimeMs: number; + private readonly keepaliveTimeoutMs: number; + + private readonly sessionIdleTimeout: number; + + private readonly interceptors: ServerInterceptor[]; + + /** + * Options that will be used to construct all Http2Server instances for this + * Server. + */ + private commonServerOptions: http2.ServerOptions; + + constructor(options?: ServerOptions) { this.options = options ?? {}; + if (this.options['grpc.enable_channelz'] === 0) { + this.channelzEnabled = false; + this.channelzTrace = new ChannelzTraceStub(); + this.callTracker = new ChannelzCallTrackerStub(); + this.listenerChildrenTracker = new ChannelzChildrenTrackerStub(); + this.sessionChildrenTracker = new ChannelzChildrenTrackerStub(); + } else { + this.channelzTrace = new ChannelzTrace(); + this.callTracker = new ChannelzCallTracker(); + this.listenerChildrenTracker = new ChannelzChildrenTracker(); + this.sessionChildrenTracker = new ChannelzChildrenTracker(); + } + + this.channelzRef = registerChannelzServer( + 'server', + () => this.getChannelzInfo(), + this.channelzEnabled + ); + + this.channelzTrace.addTrace('CT_INFO', 'Server created'); + this.maxConnectionAgeMs = + this.options['grpc.max_connection_age_ms'] ?? UNLIMITED_CONNECTION_AGE_MS; + this.maxConnectionAgeGraceMs = + this.options['grpc.max_connection_age_grace_ms'] ?? + UNLIMITED_CONNECTION_AGE_MS; + this.keepaliveTimeMs = + this.options['grpc.keepalive_time_ms'] ?? KEEPALIVE_MAX_TIME_MS; + this.keepaliveTimeoutMs = + this.options['grpc.keepalive_timeout_ms'] ?? KEEPALIVE_TIMEOUT_MS; + this.sessionIdleTimeout = + this.options['grpc.max_connection_idle_ms'] ?? MAX_CONNECTION_IDLE_MS; + + this.commonServerOptions = { + maxSendHeaderBlockLength: Number.MAX_SAFE_INTEGER, + }; + if ('grpc-node.max_session_memory' in this.options) { + this.commonServerOptions.maxSessionMemory = + this.options['grpc-node.max_session_memory']; + } else { + /* By default, set a very large max session memory limit, to effectively + * disable enforcement of the limit. Some testing indicates that Node's + * behavior degrades badly when this limit is reached, so we solve that + * by disabling the check entirely. */ + this.commonServerOptions.maxSessionMemory = Number.MAX_SAFE_INTEGER; + } + if ('grpc.max_concurrent_streams' in this.options) { + this.commonServerOptions.settings = { + maxConcurrentStreams: this.options['grpc.max_concurrent_streams'], + }; + } + this.interceptors = this.options.interceptors ?? []; + this.trace('Server constructed'); + } + + private getChannelzInfo(): ServerInfo { + return { + trace: this.channelzTrace, + callTracker: this.callTracker, + listenerChildren: this.listenerChildrenTracker.getChildLists(), + sessionChildren: this.sessionChildrenTracker.getChildLists(), + }; + } + + private getChannelzSessionInfo( + session: http2.ServerHttp2Session + ): SocketInfo { + const sessionInfo = this.sessions.get(session)!; + const sessionSocket = session.socket; + const remoteAddress = sessionSocket.remoteAddress + ? stringToSubchannelAddress( + sessionSocket.remoteAddress, + sessionSocket.remotePort + ) + : null; + const localAddress = sessionSocket.localAddress + ? stringToSubchannelAddress( + sessionSocket.localAddress!, + sessionSocket.localPort + ) + : null; + let tlsInfo: TlsInfo | null; + if (session.encrypted) { + const tlsSocket: TLSSocket = sessionSocket as TLSSocket; + const cipherInfo: CipherNameAndProtocol & { standardName?: string } = + tlsSocket.getCipher(); + const certificate = tlsSocket.getCertificate(); + const peerCertificate = tlsSocket.getPeerCertificate(); + tlsInfo = { + cipherSuiteStandardName: cipherInfo.standardName ?? null, + cipherSuiteOtherName: cipherInfo.standardName ? null : cipherInfo.name, + localCertificate: + certificate && 'raw' in certificate ? certificate.raw : null, + remoteCertificate: + peerCertificate && 'raw' in peerCertificate + ? peerCertificate.raw + : null, + }; + } else { + tlsInfo = null; + } + const socketInfo: SocketInfo = { + remoteAddress: remoteAddress, + localAddress: localAddress, + security: tlsInfo, + remoteName: null, + streamsStarted: sessionInfo.streamTracker.callsStarted, + streamsSucceeded: sessionInfo.streamTracker.callsSucceeded, + streamsFailed: sessionInfo.streamTracker.callsFailed, + messagesSent: sessionInfo.messagesSent, + messagesReceived: sessionInfo.messagesReceived, + keepAlivesSent: sessionInfo.keepAlivesSent, + lastLocalStreamCreatedTimestamp: null, + lastRemoteStreamCreatedTimestamp: + sessionInfo.streamTracker.lastCallStartedTimestamp, + lastMessageSentTimestamp: sessionInfo.lastMessageSentTimestamp, + lastMessageReceivedTimestamp: sessionInfo.lastMessageReceivedTimestamp, + localFlowControlWindow: session.state.localWindowSize ?? null, + remoteFlowControlWindow: session.state.remoteWindowSize ?? null, + }; + return socketInfo; + } + + private trace(text: string): void { + logging.trace( + LogVerbosity.DEBUG, + TRACER_NAME, + '(' + this.channelzRef.id + ') ' + text + ); } - addProtoService(): void { + addProtoService(): never { throw new Error('Not implemented. Use addService() instead'); } @@ -163,7 +458,7 @@ export class Server { throw new Error('Cannot add an empty service to a server'); } - serviceKeys.forEach((name) => { + serviceKeys.forEach(name => { const attrs = service[name]; let methodType: HandlerType; @@ -209,45 +504,270 @@ export class Server { } removeService(service: ServiceDefinition): void { - if ( - service === null || - typeof service !== 'object' - ) { + if (service === null || typeof service !== 'object') { throw new Error('removeService() requires object as argument'); } const serviceKeys = Object.keys(service); - serviceKeys.forEach((name) => { + serviceKeys.forEach(name => { const attrs = service[name]; this.unregister(attrs.path); }); } - bind(port: string, creds: ServerCredentials): void { + bind(port: string, creds: ServerCredentials): never { throw new Error('Not implemented. Use bindAsync() instead'); } - bindAsync( - port: string, - creds: ServerCredentials, - callback: (error: Error | null, port: number) => void - ): void { - if (this.started === true) { - throw new Error('server is already started'); + private registerListenerToChannelz(boundAddress: SubchannelAddress) { + return registerChannelzSocket( + subchannelAddressToString(boundAddress), + () => { + return { + localAddress: boundAddress, + remoteAddress: null, + security: null, + remoteName: null, + streamsStarted: 0, + streamsSucceeded: 0, + streamsFailed: 0, + messagesSent: 0, + messagesReceived: 0, + keepAlivesSent: 0, + lastLocalStreamCreatedTimestamp: null, + lastRemoteStreamCreatedTimestamp: null, + lastMessageSentTimestamp: null, + lastMessageReceivedTimestamp: null, + localFlowControlWindow: null, + remoteFlowControlWindow: null, + }; + }, + this.channelzEnabled + ); + } + + private createHttp2Server(credentials: ServerCredentials) { + let http2Server: http2.Http2Server | http2.Http2SecureServer; + if (credentials._isSecure()) { + const secureServerOptions = Object.assign( + this.commonServerOptions, + credentials._getSettings()! + ); + secureServerOptions.enableTrace = + this.options['grpc-node.tls_enable_trace'] === 1; + http2Server = http2.createSecureServer(secureServerOptions); + http2Server.on('secureConnection', (socket: TLSSocket) => { + /* These errors need to be handled by the user of Http2SecureServer, + * according to https://github.com/nodejs/node/issues/35824 */ + socket.on('error', (e: Error) => { + this.trace( + 'An incoming TLS connection closed with error: ' + e.message + ); + }); + }); + } else { + http2Server = http2.createServer(this.commonServerOptions); } - if (typeof port !== 'string') { - throw new TypeError('port must be a string'); + http2Server.setTimeout(0, noop); + this._setupHandlers(http2Server); + return http2Server; + } + + private bindOneAddress( + address: SubchannelAddress, + boundPortObject: BoundPort + ): Promise { + this.trace('Attempting to bind ' + subchannelAddressToString(address)); + const http2Server = this.createHttp2Server(boundPortObject.credentials); + return new Promise((resolve, reject) => { + const onError = (err: Error) => { + this.trace( + 'Failed to bind ' + + subchannelAddressToString(address) + + ' with error ' + + err.message + ); + resolve({ + port: 'port' in address ? address.port : 1, + error: err.message, + }); + }; + + http2Server.once('error', onError); + + http2Server.listen(address, () => { + const boundAddress = http2Server.address()!; + let boundSubchannelAddress: SubchannelAddress; + if (typeof boundAddress === 'string') { + boundSubchannelAddress = { + path: boundAddress, + }; + } else { + boundSubchannelAddress = { + host: boundAddress.address, + port: boundAddress.port, + }; + } + + const channelzRef = this.registerListenerToChannelz( + boundSubchannelAddress + ); + this.listenerChildrenTracker.refChild(channelzRef); + + this.http2Servers.set(http2Server, { + channelzRef: channelzRef, + sessions: new Set(), + }); + boundPortObject.listeningServers.add(http2Server); + this.trace( + 'Successfully bound ' + + subchannelAddressToString(boundSubchannelAddress) + ); + resolve({ + port: + 'port' in boundSubchannelAddress ? boundSubchannelAddress.port : 1, + }); + http2Server.removeListener('error', onError); + }); + }); + } + + private async bindManyPorts( + addressList: SubchannelAddress[], + boundPortObject: BoundPort + ): Promise { + if (addressList.length === 0) { + return { + count: 0, + port: 0, + errors: [], + }; + } + if (isTcpSubchannelAddress(addressList[0]) && addressList[0].port === 0) { + /* If binding to port 0, first try to bind the first address, then bind + * the rest of the address list to the specific port that it binds. */ + const firstAddressResult = await this.bindOneAddress( + addressList[0], + boundPortObject + ); + if (firstAddressResult.error) { + /* If the first address fails to bind, try the same operation starting + * from the second item in the list. */ + const restAddressResult = await this.bindManyPorts( + addressList.slice(1), + boundPortObject + ); + return { + ...restAddressResult, + errors: [firstAddressResult.error, ...restAddressResult.errors], + }; + } else { + const restAddresses = addressList + .slice(1) + .map(address => + isTcpSubchannelAddress(address) + ? { host: address.host, port: firstAddressResult.port } + : address + ); + const restAddressResult = await Promise.all( + restAddresses.map(address => + this.bindOneAddress(address, boundPortObject) + ) + ); + const allResults = [firstAddressResult, ...restAddressResult]; + return { + count: allResults.filter(result => result.error === undefined).length, + port: firstAddressResult.port, + errors: allResults + .filter(result => result.error) + .map(result => result.error!), + }; + } + } else { + const allResults = await Promise.all( + addressList.map(address => + this.bindOneAddress(address, boundPortObject) + ) + ); + return { + count: allResults.filter(result => result.error === undefined).length, + port: allResults[0].port, + errors: allResults + .filter(result => result.error) + .map(result => result.error!), + }; } + } - if (creds === null || typeof creds !== 'object') { - throw new TypeError('creds must be an object'); + private async bindAddressList( + addressList: SubchannelAddress[], + boundPortObject: BoundPort + ): Promise { + const bindResult = await this.bindManyPorts(addressList, boundPortObject); + if (bindResult.count > 0) { + if (bindResult.count < addressList.length) { + logging.log( + LogVerbosity.INFO, + `WARNING Only ${bindResult.count} addresses added out of total ${addressList.length} resolved` + ); + } + return bindResult.port; + } else { + const errorString = `No address added out of total ${addressList.length} resolved`; + logging.log(LogVerbosity.ERROR, errorString); + throw new Error( + `${errorString} errors: [${bindResult.errors.join(',')}]` + ); } + } - if (typeof callback !== 'function') { - throw new TypeError('callback must be a function'); + private resolvePort(port: GrpcUri): Promise { + return new Promise((resolve, reject) => { + const resolverListener: ResolverListener = { + onSuccessfulResolution: ( + endpointList, + serviceConfig, + serviceConfigError + ) => { + // We only want one resolution result. Discard all future results + resolverListener.onSuccessfulResolution = () => {}; + const addressList = ([] as SubchannelAddress[]).concat( + ...endpointList.map(endpoint => endpoint.addresses) + ); + if (addressList.length === 0) { + reject(new Error(`No addresses resolved for port ${port}`)); + return; + } + resolve(addressList); + }, + onError: error => { + reject(new Error(error.details)); + }, + }; + const resolver = createResolver(port, resolverListener, this.options); + resolver.updateResolution(); + }); + } + + private async bindPort( + port: GrpcUri, + boundPortObject: BoundPort + ): Promise { + const addressList = await this.resolvePort(port); + if (boundPortObject.cancelled) { + this.completeUnbind(boundPortObject); + throw new Error('bindAsync operation cancelled by unbind call'); + } + const portNumber = await this.bindAddressList(addressList, boundPortObject); + if (boundPortObject.cancelled) { + this.completeUnbind(boundPortObject); + throw new Error('bindAsync operation cancelled by unbind call'); } + return portNumber; + } + private normalizePort(port: string): GrpcUri { const initialPortUri = parseUri(port); if (initialPortUri === null) { throw new Error(`Could not parse port "${port}"`); @@ -256,205 +776,257 @@ export class Server { if (portUri === null) { throw new Error(`Could not get a default scheme for port "${port}"`); } + return portUri; + } - const serverOptions: http2.ServerOptions = { - maxSendHeaderBlockLength: Number.MAX_SAFE_INTEGER - }; - if ('grpc-node.max_session_memory' in this.options) { - serverOptions.maxSessionMemory = this.options['grpc-node.max_session_memory']; + bindAsync( + port: string, + creds: ServerCredentials, + callback: (error: Error | null, port: number) => void + ): void { + if (this.shutdown) { + throw new Error('bindAsync called after shutdown'); } - if ('grpc.max_concurrent_streams' in this.options) { - serverOptions.settings = { - maxConcurrentStreams: this.options['grpc.max_concurrent_streams'], - }; + if (typeof port !== 'string') { + throw new TypeError('port must be a string'); } - const setupServer = (): http2.Http2Server | http2.Http2SecureServer => { - let http2Server: http2.Http2Server | http2.Http2SecureServer; - if (creds._isSecure()) { - const secureServerOptions = Object.assign( - serverOptions, - creds._getSettings()! - ); - http2Server = http2.createSecureServer(secureServerOptions); - } else { - http2Server = http2.createServer(serverOptions); - } + if (creds === null || !(creds instanceof ServerCredentials)) { + throw new TypeError('creds must be a ServerCredentials object'); + } - http2Server.setTimeout(0, noop); - this._setupHandlers(http2Server); - return http2Server; - }; + if (typeof callback !== 'function') { + throw new TypeError('callback must be a function'); + } - const bindSpecificPort = ( - addressList: SubchannelAddress[], - portNum: number, - previousCount: number - ): Promise => { - if (addressList.length === 0) { - return Promise.resolve({ port: portNum, count: previousCount }); - } - return Promise.all( - addressList.map((address) => { - trace('Attempting to bind ' + subchannelAddressToString(address)); - let addr: SubchannelAddress; - if (isTcpSubchannelAddress(address)) { - addr = { - host: (address as TcpSubchannelAddress).host, - port: portNum, - }; - } else { - addr = address; - } + this.trace('bindAsync port=' + port); - const http2Server = setupServer(); - return new Promise((resolve, reject) => { - function onError(err: Error): void { - resolve(err); - } - - http2Server.once('error', onError); - - http2Server.listen(addr, () => { - trace('Successfully bound ' + subchannelAddressToString(address)); - this.http2ServerList.push(http2Server); - const boundAddress = http2Server.address()!; - if (typeof boundAddress === 'string') { - resolve(portNum); - } else { - resolve(boundAddress.port); - } - http2Server.removeListener('error', onError); - }); - }); - }) - ).then((results) => { - let count = 0; - for (const result of results) { - if (typeof result === 'number') { - count += 1; - if (result !== portNum) { - throw new Error( - 'Invalid state: multiple port numbers added from single address' - ); - } - } - } - return { - port: portNum, - count: count + previousCount, - }; - }); + const portUri = this.normalizePort(port); + + const deferredCallback = (error: Error | null, port: number) => { + process.nextTick(() => callback(error, port)); }; - const bindWildcardPort = ( - addressList: SubchannelAddress[] - ): Promise => { - if (addressList.length === 0) { - return Promise.resolve({ port: 0, count: 0 }); - } - const address = addressList[0]; - const http2Server = setupServer(); - return new Promise((resolve, reject) => { - function onError(err: Error): void { - resolve(bindWildcardPort(addressList.slice(1))); + /* First, if this port is already bound or that bind operation is in + * progress, use that result. */ + let boundPortObject = this.boundPorts.get(uriToString(portUri)); + if (boundPortObject) { + if (!creds._equals(boundPortObject.credentials)) { + deferredCallback( + new Error(`${port} already bound with incompatible credentials`), + 0 + ); + return; + } + /* If that operation has previously been cancelled by an unbind call, + * uncancel it. */ + boundPortObject.cancelled = false; + if (boundPortObject.completionPromise) { + boundPortObject.completionPromise.then( + portNum => callback(null, portNum), + error => callback(error as Error, 0) + ); + } else { + deferredCallback(null, boundPortObject.portNumber); + } + return; + } + boundPortObject = { + mapKey: uriToString(portUri), + originalUri: portUri, + completionPromise: null, + cancelled: false, + portNumber: 0, + credentials: creds, + listeningServers: new Set(), + }; + const splitPort = splitHostPort(portUri.path); + const completionPromise = this.bindPort(portUri, boundPortObject); + boundPortObject.completionPromise = completionPromise; + /* If the port number is 0, defer populating the map entry until after the + * bind operation completes and we have a specific port number. Otherwise, + * populate it immediately. */ + if (splitPort?.port === 0) { + completionPromise.then( + portNum => { + const finalUri: GrpcUri = { + scheme: portUri.scheme, + authority: portUri.authority, + path: combineHostPort({ host: splitPort.host, port: portNum }), + }; + boundPortObject!.mapKey = uriToString(finalUri); + boundPortObject!.completionPromise = null; + boundPortObject!.portNumber = portNum; + this.boundPorts.set(boundPortObject!.mapKey, boundPortObject!); + callback(null, portNum); + }, + error => { + callback(error, 0); + } + ); + } else { + this.boundPorts.set(boundPortObject.mapKey, boundPortObject); + completionPromise.then( + portNum => { + boundPortObject!.completionPromise = null; + boundPortObject!.portNumber = portNum; + callback(null, portNum); + }, + error => { + callback(error, 0); } + ); + } + } - http2Server.once('error', onError); + private closeServer(server: AnyHttp2Server, callback?: () => void) { + this.trace( + 'Closing server with address ' + JSON.stringify(server.address()) + ); + const serverInfo = this.http2Servers.get(server); + server.close(() => { + if (serverInfo) { + this.listenerChildrenTracker.unrefChild(serverInfo.channelzRef); + unregisterChannelzRef(serverInfo.channelzRef); + } + this.http2Servers.delete(server); + callback?.(); + }); + } - http2Server.listen(address, () => { - this.http2ServerList.push(http2Server); - resolve( - bindSpecificPort( - addressList.slice(1), - (http2Server.address() as AddressInfo).port, - 1 - ) - ); - http2Server.removeListener('error', onError); - }); - }); + private closeSession( + session: http2.ServerHttp2Session, + callback?: () => void + ) { + this.trace('Closing session initiated by ' + session.socket?.remoteAddress); + const sessionInfo = this.sessions.get(session); + const closeCallback = () => { + if (sessionInfo) { + this.sessionChildrenTracker.unrefChild(sessionInfo.ref); + unregisterChannelzRef(sessionInfo.ref); + } + callback?.(); }; + if (session.closed) { + queueMicrotask(closeCallback); + } else { + session.close(closeCallback); + } + } - const resolverListener: ResolverListener = { - onSuccessfulResolution: ( - addressList, - serviceConfig, - serviceConfigError - ) => { - // We only want one resolution result. Discard all future results - resolverListener.onSuccessfulResolution = () => {}; - if (addressList.length === 0) { - callback(new Error(`No addresses resolved for port ${port}`), 0); - return; - } - let bindResultPromise: Promise; - if (isTcpSubchannelAddress(addressList[0])) { - if (addressList[0].port === 0) { - bindResultPromise = bindWildcardPort(addressList); - } else { - bindResultPromise = bindSpecificPort( - addressList, - addressList[0].port, - 0 - ); - } - } else { - // Use an arbitrary non-zero port for non-TCP addresses - bindResultPromise = bindSpecificPort(addressList, 1, 0); + private completeUnbind(boundPortObject: BoundPort) { + for (const server of boundPortObject.listeningServers) { + const serverInfo = this.http2Servers.get(server); + this.closeServer(server, () => { + boundPortObject.listeningServers.delete(server); + }); + if (serverInfo) { + for (const session of serverInfo.sessions) { + this.closeSession(session); } - bindResultPromise.then( - (bindResult) => { - if (bindResult.count === 0) { - const errorString = `No address added out of total ${addressList.length} resolved`; - logging.log(LogVerbosity.ERROR, errorString); - callback(new Error(errorString), 0); - } else { - if (bindResult.count < addressList.length) { - logging.log( - LogVerbosity.INFO, - `WARNING Only ${bindResult.count} addresses added out of total ${addressList.length} resolved` - ); - } - callback(null, bindResult.port); - } - }, - (error) => { - const errorString = `No address added out of total ${addressList.length} resolved`; - logging.log(LogVerbosity.ERROR, errorString); - callback(new Error(errorString), 0); - } - ); - }, - onError: (error) => { - callback(new Error(error.details), 0); - }, - }; - - const resolver = createResolver(portUri, resolverListener, this.options); - resolver.updateResolution(); + } + } + this.boundPorts.delete(boundPortObject.mapKey); } - forceShutdown(): void { - // Close the server if it is still running. + /** + * Unbind a previously bound port, or cancel an in-progress bindAsync + * operation. If port 0 was bound, only the actual bound port can be + * unbound. For example, if bindAsync was called with "localhost:0" and the + * bound port result was 54321, it can be unbound as "localhost:54321". + * @param port + */ + unbind(port: string): void { + this.trace('unbind port=' + port); + const portUri = this.normalizePort(port); + const splitPort = splitHostPort(portUri.path); + if (splitPort?.port === 0) { + throw new Error('Cannot unbind port 0'); + } + const boundPortObject = this.boundPorts.get(uriToString(portUri)); + if (boundPortObject) { + this.trace( + 'unbinding ' + + boundPortObject.mapKey + + ' originally bound as ' + + uriToString(boundPortObject.originalUri) + ); + /* If the bind operation is pending, the cancelled flag will trigger + * the unbind operation later. */ + if (boundPortObject.completionPromise) { + boundPortObject.cancelled = true; + } else { + this.completeUnbind(boundPortObject); + } + } + } - for (const http2Server of this.http2ServerList) { - if (http2Server.listening) { - http2Server.close(); + /** + * Gracefully close all connections associated with a previously bound port. + * After the grace time, forcefully close all remaining open connections. + * + * If port 0 was bound, only the actual bound port can be + * drained. For example, if bindAsync was called with "localhost:0" and the + * bound port result was 54321, it can be drained as "localhost:54321". + * @param port + * @param graceTimeMs + * @returns + */ + drain(port: string, graceTimeMs: number): void { + this.trace('drain port=' + port + ' graceTimeMs=' + graceTimeMs); + const portUri = this.normalizePort(port); + const splitPort = splitHostPort(portUri.path); + if (splitPort?.port === 0) { + throw new Error('Cannot drain port 0'); + } + const boundPortObject = this.boundPorts.get(uriToString(portUri)); + if (!boundPortObject) { + return; + } + const allSessions: Set = new Set(); + for (const http2Server of boundPortObject.listeningServers) { + const serverEntry = this.http2Servers.get(http2Server); + if (serverEntry) { + for (const session of serverEntry.sessions) { + allSessions.add(session); + this.closeSession(session, () => { + allSessions.delete(session); + }); + } } } + /* After the grace time ends, send another goaway to all remaining sessions + * with the CANCEL code. */ + setTimeout(() => { + for (const session of allSessions) { + session.destroy(http2.constants.NGHTTP2_CANCEL as any); + } + }, graceTimeMs).unref?.(); + } - this.started = false; + forceShutdown(): void { + for (const boundPortObject of this.boundPorts.values()) { + boundPortObject.cancelled = true; + } + this.boundPorts.clear(); + // Close the server if it is still running. + for (const server of this.http2Servers.keys()) { + this.closeServer(server); + } // Always destroy any available sessions. It's possible that one or more // tryShutdown() calls are in progress. Don't wait on them to finish. - this.sessions.forEach((session) => { + this.sessions.forEach((channelzInfo, session) => { + this.closeSession(session); // Cast NGHTTP2_CANCEL to any because TypeScript doesn't seem to // recognize destroy(code) as a valid signature. // eslint-disable-next-line @typescript-eslint/no-explicit-any session.destroy(http2.constants.NGHTTP2_CANCEL as any); }); this.sessions.clear(); + unregisterChannelzRef(this.channelzRef); + + this.shutdown = true; } register( @@ -482,12 +1054,16 @@ export class Server { return this.handlers.delete(name); } + /** + * @deprecated No longer needed as of version 1.10.x + */ + @deprecate( + 'Calling start() is no longer necessary. It can be safely omitted.' + ) start(): void { if ( - this.http2ServerList.length === 0 || - this.http2ServerList.every( - (http2Server) => http2Server.listening !== true - ) + this.http2Servers.size === 0 || + [...this.http2Servers.keys()].every(server => !server.listening) ) { throw new Error('server must be bound in order to start'); } @@ -495,46 +1071,273 @@ export class Server { if (this.started === true) { throw new Error('server is already started'); } - this.started = true; } tryShutdown(callback: (error?: Error) => void): void { + const wrappedCallback = (error?: Error) => { + unregisterChannelzRef(this.channelzRef); + callback(error); + }; let pendingChecks = 0; function maybeCallback(): void { pendingChecks--; if (pendingChecks === 0) { - callback(); + wrappedCallback(); } } + this.shutdown = true; + + for (const [serverKey, server] of this.http2Servers.entries()) { + pendingChecks++; + const serverString = server.channelzRef.name; + this.trace('Waiting for server ' + serverString + ' to close'); + this.closeServer(serverKey, () => { + this.trace('Server ' + serverString + ' finished closing'); + maybeCallback(); + }); - // Close the server if necessary. - this.started = false; - - for (const http2Server of this.http2ServerList) { - if (http2Server.listening) { + for (const session of server.sessions.keys()) { pendingChecks++; - http2Server.close(maybeCallback); + const sessionString = session.socket?.remoteAddress; + this.trace('Waiting for session ' + sessionString + ' to close'); + this.closeSession(session, () => { + this.trace('Session ' + sessionString + ' finished closing'); + maybeCallback(); + }); } } - this.sessions.forEach((session) => { - if (!session.closed) { - pendingChecks += 1; - session.close(maybeCallback); - } - }); if (pendingChecks === 0) { - callback(); + wrappedCallback(); } } - addHttp2Port(): void { + addHttp2Port(): never { throw new Error('Not yet implemented'); } + /** + * Get the channelz reference object for this server. The returned value is + * garbage if channelz is disabled for this server. + * @returns + */ + getChannelzRef() { + return this.channelzRef; + } + + private _verifyContentType( + stream: http2.ServerHttp2Stream, + headers: http2.IncomingHttpHeaders + ): boolean { + const contentType = headers[http2.constants.HTTP2_HEADER_CONTENT_TYPE]; + + if ( + typeof contentType !== 'string' || + !contentType.startsWith('application/grpc') + ) { + stream.respond( + { + [http2.constants.HTTP2_HEADER_STATUS]: + http2.constants.HTTP_STATUS_UNSUPPORTED_MEDIA_TYPE, + }, + { endStream: true } + ); + return false; + } + + return true; + } + + private _retrieveHandler(path: string): Handler | null { + this.trace( + 'Received call to method ' + + path + + ' at address ' + + this.serverAddressString + ); + + const handler = this.handlers.get(path); + + if (handler === undefined) { + this.trace( + 'No handler registered for method ' + + path + + '. Sending UNIMPLEMENTED status.' + ); + return null; + } + + return handler; + } + + private _respondWithError( + err: PartialStatusObject, + stream: http2.ServerHttp2Stream, + channelzSessionInfo: ChannelzSessionInfo | null = null + ) { + const trailersToSend = { + 'grpc-status': err.code ?? Status.INTERNAL, + 'grpc-message': err.details, + [http2.constants.HTTP2_HEADER_STATUS]: http2.constants.HTTP_STATUS_OK, + [http2.constants.HTTP2_HEADER_CONTENT_TYPE]: 'application/grpc+proto', + ...err.metadata?.toHttp2Headers(), + }; + stream.respond(trailersToSend, { endStream: true }); + + this.callTracker.addCallFailed(); + channelzSessionInfo?.streamTracker.addCallFailed(); + } + + private _channelzHandler( + stream: http2.ServerHttp2Stream, + headers: http2.IncomingHttpHeaders + ) { + // for handling idle timeout + this.onStreamOpened(stream); + + const channelzSessionInfo = this.sessions.get( + stream.session as http2.ServerHttp2Session + ); + + this.callTracker.addCallStarted(); + channelzSessionInfo?.streamTracker.addCallStarted(); + + if (!this._verifyContentType(stream, headers)) { + this.callTracker.addCallFailed(); + channelzSessionInfo?.streamTracker.addCallFailed(); + return; + } + + const path = headers[HTTP2_HEADER_PATH] as string; + + const handler = this._retrieveHandler(path); + if (!handler) { + this._respondWithError( + getUnimplementedStatusResponse(path), + stream, + channelzSessionInfo + ); + return; + } + + const callEventTracker: CallEventTracker = { + addMessageSent: () => { + if (channelzSessionInfo) { + channelzSessionInfo.messagesSent += 1; + channelzSessionInfo.lastMessageSentTimestamp = new Date(); + } + }, + addMessageReceived: () => { + if (channelzSessionInfo) { + channelzSessionInfo.messagesReceived += 1; + channelzSessionInfo.lastMessageReceivedTimestamp = new Date(); + } + }, + onCallEnd: status => { + if (status.code === Status.OK) { + this.callTracker.addCallSucceeded(); + } else { + this.callTracker.addCallFailed(); + } + }, + onStreamEnd: success => { + if (channelzSessionInfo) { + if (success) { + channelzSessionInfo.streamTracker.addCallSucceeded(); + } else { + channelzSessionInfo.streamTracker.addCallFailed(); + } + } + }, + }; + + const call = getServerInterceptingCall( + this.interceptors, + stream, + headers, + callEventTracker, + handler, + this.options + ); + + if (!this._runHandlerForCall(call, handler)) { + this.callTracker.addCallFailed(); + channelzSessionInfo?.streamTracker.addCallFailed(); + + call.sendStatus({ + code: Status.INTERNAL, + details: `Unknown handler type: ${handler.type}`, + }); + } + } + + private _streamHandler( + stream: http2.ServerHttp2Stream, + headers: http2.IncomingHttpHeaders + ) { + // for handling idle timeout + this.onStreamOpened(stream); + + if (this._verifyContentType(stream, headers) !== true) { + return; + } + + const path = headers[HTTP2_HEADER_PATH] as string; + + const handler = this._retrieveHandler(path); + if (!handler) { + this._respondWithError( + getUnimplementedStatusResponse(path), + stream, + null + ); + return; + } + + const call = getServerInterceptingCall( + this.interceptors, + stream, + headers, + null, + handler, + this.options + ); + + if (!this._runHandlerForCall(call, handler)) { + call.sendStatus({ + code: Status.INTERNAL, + details: `Unknown handler type: ${handler.type}`, + }); + } + } + + private _runHandlerForCall( + call: ServerInterceptingCallInterface, + handler: + | UntypedUnaryHandler + | UntypedClientStreamingHandler + | UntypedServerStreamingHandler + | UntypedBidiStreamingHandler + ): boolean { + const { type } = handler; + if (type === 'unary') { + handleUnary(call, handler); + } else if (type === 'clientStream') { + handleClientStreaming(call, handler); + } else if (type === 'serverStream') { + handleServerStreaming(call, handler); + } else if (type === 'bidi') { + handleBidiStreaming(call, handler); + } else { + return false; + } + + return true; + } + private _setupHandlers( http2Server: http2.Http2Server | http2.Http2SecureServer ): void { @@ -542,151 +1345,405 @@ export class Server { return; } - http2Server.on( - 'stream', - (stream: http2.ServerHttp2Stream, headers: http2.IncomingHttpHeaders) => { - const contentType = headers[http2.constants.HTTP2_HEADER_CONTENT_TYPE]; - - if ( - typeof contentType !== 'string' || - !contentType.startsWith('application/grpc') - ) { - stream.respond( - { - [http2.constants.HTTP2_HEADER_STATUS]: - http2.constants.HTTP_STATUS_UNSUPPORTED_MEDIA_TYPE, - }, - { endStream: true } - ); - return; - } + const serverAddress = http2Server.address(); + let serverAddressString = 'null'; + if (serverAddress) { + if (typeof serverAddress === 'string') { + serverAddressString = serverAddress; + } else { + serverAddressString = serverAddress.address + ':' + serverAddress.port; + } + } + this.serverAddressString = serverAddressString; - try { - const path = headers[http2.constants.HTTP2_HEADER_PATH] as string; - const serverAddress = http2Server.address(); - let serverAddressString = 'null'; - if (serverAddress) { - if (typeof serverAddress === 'string') { - serverAddressString = serverAddress; - } else { - serverAddressString = - serverAddress.address + ':' + serverAddress.port; - } - } - trace( - 'Received call to method ' + - path + - ' at address ' + - serverAddressString + const handler = this.channelzEnabled + ? this._channelzHandler + : this._streamHandler; + + const sessionHandler = this.channelzEnabled + ? this._channelzSessionHandler(http2Server) + : this._sessionHandler(http2Server); + + http2Server.on('stream', handler.bind(this)); + http2Server.on('session', sessionHandler); + } + + private _sessionHandler( + http2Server: http2.Http2Server | http2.Http2SecureServer + ) { + return (session: http2.ServerHttp2Session) => { + this.http2Servers.get(http2Server)?.sessions.add(session); + + let connectionAgeTimer: NodeJS.Timeout | null = null; + let connectionAgeGraceTimer: NodeJS.Timeout | null = null; + let keeapliveTimeTimer: NodeJS.Timeout | null = null; + let keepaliveTimeoutTimer: NodeJS.Timeout | null = null; + let sessionClosedByServer = false; + + const idleTimeoutObj = this.enableIdleTimeout(session); + + if (this.maxConnectionAgeMs !== UNLIMITED_CONNECTION_AGE_MS) { + // Apply a random jitter within a +/-10% range + const jitterMagnitude = this.maxConnectionAgeMs / 10; + const jitter = Math.random() * jitterMagnitude * 2 - jitterMagnitude; + + connectionAgeTimer = setTimeout(() => { + sessionClosedByServer = true; + + this.trace( + 'Connection dropped by max connection age: ' + + session.socket?.remoteAddress ); - const handler = this.handlers.get(path); - if (handler === undefined) { - trace( - 'No handler registered for method ' + - path + - '. Sending UNIMPLEMENTED status.' + try { + session.goaway( + http2.constants.NGHTTP2_NO_ERROR, + ~(1 << 31), + kMaxAge ); - throw getUnimplementedStatusResponse(path); + } catch (e) { + // The goaway can't be sent because the session is already closed + session.destroy(); + return; } + session.close(); + + /* Allow a grace period after sending the GOAWAY before forcibly + * closing the connection. */ + if (this.maxConnectionAgeGraceMs !== UNLIMITED_CONNECTION_AGE_MS) { + connectionAgeGraceTimer = setTimeout(() => { + session.destroy(); + }, this.maxConnectionAgeGraceMs); + connectionAgeGraceTimer.unref?.(); + } + }, this.maxConnectionAgeMs + jitter); + connectionAgeTimer.unref?.(); + } - const call = new Http2ServerCallStream(stream, handler, this.options); - const metadata: Metadata = call.receiveMetadata(headers) as Metadata; - switch (handler.type) { - case 'unary': - handleUnary(call, handler as UntypedUnaryHandler, metadata); - break; - case 'clientStream': - handleClientStreaming( - call, - handler as UntypedClientStreamingHandler, - metadata - ); - break; - case 'serverStream': - handleServerStreaming( - call, - handler as UntypedServerStreamingHandler, - metadata - ); - break; - case 'bidi': - handleBidiStreaming( - call, - handler as UntypedBidiStreamingHandler, - metadata - ); - break; - default: - throw new Error(`Unknown handler type: ${handler.type}`); + if (this.keepaliveTimeMs < KEEPALIVE_MAX_TIME_MS) { + keeapliveTimeTimer = setInterval(() => { + keepaliveTimeoutTimer = setTimeout(() => { + sessionClosedByServer = true; + session.close(); + }, this.keepaliveTimeoutMs); + keepaliveTimeoutTimer.unref?.(); + + try { + session.ping( + (err: Error | null, duration: number, payload: Buffer) => { + if (keepaliveTimeoutTimer) { + clearTimeout(keepaliveTimeoutTimer); + } + + if (err) { + sessionClosedByServer = true; + this.trace( + 'Connection dropped due to error of a ping frame ' + + err.message + + ' return in ' + + duration + ); + session.close(); + } + } + ); + } catch (e) { + clearTimeout(keepaliveTimeoutTimer); + // The ping can't be sent because the session is already closed + session.destroy(); } - } catch (err) { - const call = new Http2ServerCallStream(stream, null!, this.options); + }, this.keepaliveTimeMs); + keeapliveTimeTimer.unref?.(); + } - if (err.code === undefined) { - err.code = Status.INTERNAL; + session.on('close', () => { + if (!sessionClosedByServer) { + this.trace( + `Connection dropped by client ${session.socket?.remoteAddress}` + ); + } + + if (connectionAgeTimer) { + clearTimeout(connectionAgeTimer); + } + + if (connectionAgeGraceTimer) { + clearTimeout(connectionAgeGraceTimer); + } + + if (keeapliveTimeTimer) { + clearInterval(keeapliveTimeTimer); + if (keepaliveTimeoutTimer) { + clearTimeout(keepaliveTimeoutTimer); } + } - call.sendError(err); + if (idleTimeoutObj !== null) { + clearTimeout(idleTimeoutObj.timeout); + this.sessionIdleTimeouts.delete(session); } - } - ); - http2Server.on('session', (session) => { - if (!this.started) { - session.destroy(); - return; + this.http2Servers.get(http2Server)?.sessions.delete(session); + }); + }; + } + + private _channelzSessionHandler( + http2Server: http2.Http2Server | http2.Http2SecureServer + ) { + return (session: http2.ServerHttp2Session) => { + const channelzRef = registerChannelzSocket( + session.socket?.remoteAddress ?? 'unknown', + this.getChannelzSessionInfo.bind(this, session), + this.channelzEnabled + ); + + const channelzSessionInfo: ChannelzSessionInfo = { + ref: channelzRef, + streamTracker: new ChannelzCallTracker(), + messagesSent: 0, + messagesReceived: 0, + keepAlivesSent: 0, + lastMessageSentTimestamp: null, + lastMessageReceivedTimestamp: null, + }; + + this.http2Servers.get(http2Server)?.sessions.add(session); + this.sessions.set(session, channelzSessionInfo); + const clientAddress = `${session.socket.remoteAddress}:${session.socket.remotePort}`; + + this.channelzTrace.addTrace( + 'CT_INFO', + 'Connection established by client ' + clientAddress + ); + this.trace('Connection established by client ' + clientAddress); + this.sessionChildrenTracker.refChild(channelzRef); + + let connectionAgeTimer: NodeJS.Timeout | null = null; + let connectionAgeGraceTimer: NodeJS.Timeout | null = null; + let keeapliveTimeTimer: NodeJS.Timeout | null = null; + let keepaliveTimeoutTimer: NodeJS.Timeout | null = null; + let sessionClosedByServer = false; + + const idleTimeoutObj = this.enableIdleTimeout(session); + + if (this.maxConnectionAgeMs !== UNLIMITED_CONNECTION_AGE_MS) { + // Apply a random jitter within a +/-10% range + const jitterMagnitude = this.maxConnectionAgeMs / 10; + const jitter = Math.random() * jitterMagnitude * 2 - jitterMagnitude; + + connectionAgeTimer = setTimeout(() => { + sessionClosedByServer = true; + this.channelzTrace.addTrace( + 'CT_INFO', + 'Connection dropped by max connection age from ' + clientAddress + ); + + try { + session.goaway( + http2.constants.NGHTTP2_NO_ERROR, + ~(1 << 31), + kMaxAge + ); + } catch (e) { + // The goaway can't be sent because the session is already closed + session.destroy(); + return; + } + session.close(); + + /* Allow a grace period after sending the GOAWAY before forcibly + * closing the connection. */ + if (this.maxConnectionAgeGraceMs !== UNLIMITED_CONNECTION_AGE_MS) { + connectionAgeGraceTimer = setTimeout(() => { + session.destroy(); + }, this.maxConnectionAgeGraceMs); + connectionAgeGraceTimer.unref?.(); + } + }, this.maxConnectionAgeMs + jitter); + connectionAgeTimer.unref?.(); } - this.sessions.add(session); + if (this.keepaliveTimeMs < KEEPALIVE_MAX_TIME_MS) { + keeapliveTimeTimer = setInterval(() => { + keepaliveTimeoutTimer = setTimeout(() => { + sessionClosedByServer = true; + this.channelzTrace.addTrace( + 'CT_INFO', + 'Connection dropped by keepalive timeout from ' + clientAddress + ); + + session.close(); + }, this.keepaliveTimeoutMs); + keepaliveTimeoutTimer.unref?.(); + + try { + session.ping( + (err: Error | null, duration: number, payload: Buffer) => { + if (keepaliveTimeoutTimer) { + clearTimeout(keepaliveTimeoutTimer); + } + + if (err) { + sessionClosedByServer = true; + this.channelzTrace.addTrace( + 'CT_INFO', + 'Connection dropped due to error of a ping frame ' + + err.message + + ' return in ' + + duration + ); + + session.close(); + } + } + ); + channelzSessionInfo.keepAlivesSent += 1; + } catch (e) { + clearTimeout(keepaliveTimeoutTimer); + // The ping can't be sent because the session is already closed + session.destroy(); + } + }, this.keepaliveTimeMs); + keeapliveTimeTimer.unref?.(); + } session.on('close', () => { + if (!sessionClosedByServer) { + this.channelzTrace.addTrace( + 'CT_INFO', + 'Connection dropped by client ' + clientAddress + ); + } + + this.sessionChildrenTracker.unrefChild(channelzRef); + unregisterChannelzRef(channelzRef); + + if (connectionAgeTimer) { + clearTimeout(connectionAgeTimer); + } + + if (connectionAgeGraceTimer) { + clearTimeout(connectionAgeGraceTimer); + } + + if (keeapliveTimeTimer) { + clearInterval(keeapliveTimeTimer); + if (keepaliveTimeoutTimer) { + clearTimeout(keepaliveTimeoutTimer); + } + } + + if (idleTimeoutObj !== null) { + clearTimeout(idleTimeoutObj.timeout); + this.sessionIdleTimeouts.delete(session); + } + + this.http2Servers.get(http2Server)?.sessions.delete(session); this.sessions.delete(session); }); - }); + }; } -} -async function handleUnary( - call: Http2ServerCallStream, - handler: UnaryHandler, - metadata: Metadata -): Promise { - const request = await call.receiveUnaryMessage(); + private enableIdleTimeout( + session: http2.ServerHttp2Session + ): SessionIdleTimeoutTracker | null { + if (this.sessionIdleTimeout >= MAX_CONNECTION_IDLE_MS) { + return null; + } + + const idleTimeoutObj: SessionIdleTimeoutTracker = { + activeStreams: 0, + lastIdle: Date.now(), + onClose: this.onStreamClose.bind(this, session), + timeout: setTimeout( + this.onIdleTimeout, + this.sessionIdleTimeout, + this, + session + ), + }; + idleTimeoutObj.timeout.unref?.(); + this.sessionIdleTimeouts.set(session, idleTimeoutObj); + + const { socket } = session; + this.trace( + 'Enable idle timeout for ' + + socket.remoteAddress + + ':' + + socket.remotePort + ); + + return idleTimeoutObj; + } + + private onIdleTimeout( + this: undefined, + ctx: Server, + session: http2.ServerHttp2Session + ) { + const { socket } = session; + const sessionInfo = ctx.sessionIdleTimeouts.get(session); + + // if it is called while we have activeStreams - timer will not be rescheduled + // until last active stream is closed, then it will call .refresh() on the timer + // important part is to not clearTimeout(timer) or it becomes unusable + // for future refreshes + if ( + sessionInfo !== undefined && + sessionInfo.activeStreams === 0 && + Date.now() - sessionInfo.lastIdle >= ctx.sessionIdleTimeout + ) { + ctx.trace( + 'Session idle timeout triggered for ' + + socket?.remoteAddress + + ':' + + socket?.remotePort + + ' last idle at ' + + sessionInfo.lastIdle + ); - if (request === undefined || call.cancelled) { - return; + ctx.closeSession(session); + } } - const emitter = new ServerUnaryCallImpl( - call, - metadata, - request - ); + private onStreamOpened(stream: http2.ServerHttp2Stream) { + const session = stream.session as http2.ServerHttp2Session; - handler.func( - emitter, - ( - err: ServerErrorResponse | ServerStatusResponse | null, - value?: ResponseType | null, - trailer?: Metadata, - flags?: number - ) => { - call.sendUnaryMessage(err, value, trailer, flags); + const idleTimeoutObj = this.sessionIdleTimeouts.get(session); + if (idleTimeoutObj) { + idleTimeoutObj.activeStreams += 1; + stream.once('close', idleTimeoutObj.onClose); + } + } + + private onStreamClose(session: http2.ServerHttp2Session) { + const idleTimeoutObj = this.sessionIdleTimeouts.get(session); + + if (idleTimeoutObj) { + idleTimeoutObj.activeStreams -= 1; + if (idleTimeoutObj.activeStreams === 0) { + idleTimeoutObj.lastIdle = Date.now(); + idleTimeoutObj.timeout.refresh(); + + this.trace( + 'Session onStreamClose' + + session.socket?.remoteAddress + + ':' + + session.socket?.remotePort + + ' at ' + + idleTimeoutObj.lastIdle + ); + } } - ); + } } -function handleClientStreaming( - call: Http2ServerCallStream, - handler: ClientStreamingHandler, - metadata: Metadata -): void { - const stream = new ServerReadableStreamImpl( - call, - metadata, - handler.deserialize - ); +async function handleUnary( + call: ServerInterceptingCallInterface, + handler: UnaryHandler +): Promise { + let stream: ServerUnaryCall; function respond( err: ServerErrorResponse | ServerStatusResponse | null, @@ -694,54 +1751,225 @@ function handleClientStreaming( trailer?: Metadata, flags?: number ) { - stream.destroy(); - call.sendUnaryMessage(err, value, trailer, flags); - } - - if (call.cancelled) { - return; + if (err) { + call.sendStatus(serverErrorToStatus(err, trailer)); + return; + } + call.sendMessage(value, () => { + call.sendStatus({ + code: Status.OK, + details: 'OK', + metadata: trailer ?? null, + }); + }); } - stream.on('error', respond); - handler.func(stream, respond); + let requestMetadata: Metadata; + let requestMessage: RequestType | null = null; + call.start({ + onReceiveMetadata(metadata) { + requestMetadata = metadata; + call.startRead(); + }, + onReceiveMessage(message) { + if (requestMessage) { + call.sendStatus({ + code: Status.UNIMPLEMENTED, + details: `Received a second request message for server streaming method ${handler.path}`, + metadata: null, + }); + return; + } + requestMessage = message; + call.startRead(); + }, + onReceiveHalfClose() { + if (!requestMessage) { + call.sendStatus({ + code: Status.UNIMPLEMENTED, + details: `Received no request message for server streaming method ${handler.path}`, + metadata: null, + }); + return; + } + stream = new ServerWritableStreamImpl( + handler.path, + call, + requestMetadata, + requestMessage + ); + try { + handler.func(stream, respond); + } catch (err) { + call.sendStatus({ + code: Status.UNKNOWN, + details: `Server method handler threw error ${ + (err as Error).message + }`, + metadata: null, + }); + } + }, + onCancel() { + if (stream) { + stream.cancelled = true; + stream.emit('cancelled', 'cancelled'); + } + }, + }); } -async function handleServerStreaming( - call: Http2ServerCallStream, - handler: ServerStreamingHandler, - metadata: Metadata -): Promise { - const request = await call.receiveUnaryMessage(); +function handleClientStreaming( + call: ServerInterceptingCallInterface, + handler: ClientStreamingHandler +): void { + let stream: ServerReadableStream; - if (request === undefined || call.cancelled) { - return; + function respond( + err: ServerErrorResponse | ServerStatusResponse | null, + value?: ResponseType | null, + trailer?: Metadata, + flags?: number + ) { + if (err) { + call.sendStatus(serverErrorToStatus(err, trailer)); + return; + } + call.sendMessage(value, () => { + call.sendStatus({ + code: Status.OK, + details: 'OK', + metadata: trailer ?? null, + }); + }); } - const stream = new ServerWritableStreamImpl( - call, - metadata, - handler.serialize, - request - ); + call.start({ + onReceiveMetadata(metadata) { + stream = new ServerDuplexStreamImpl(handler.path, call, metadata); + try { + handler.func(stream, respond); + } catch (err) { + call.sendStatus({ + code: Status.UNKNOWN, + details: `Server method handler threw error ${ + (err as Error).message + }`, + metadata: null, + }); + } + }, + onReceiveMessage(message) { + stream.push(message); + }, + onReceiveHalfClose() { + stream.push(null); + }, + onCancel() { + if (stream) { + stream.cancelled = true; + stream.emit('cancelled', 'cancelled'); + stream.destroy(); + } + }, + }); +} - handler.func(stream); +function handleServerStreaming( + call: ServerInterceptingCallInterface, + handler: ServerStreamingHandler +): void { + let stream: ServerWritableStream; + + let requestMetadata: Metadata; + let requestMessage: RequestType | null = null; + call.start({ + onReceiveMetadata(metadata) { + requestMetadata = metadata; + call.startRead(); + }, + onReceiveMessage(message) { + if (requestMessage) { + call.sendStatus({ + code: Status.UNIMPLEMENTED, + details: `Received a second request message for server streaming method ${handler.path}`, + metadata: null, + }); + return; + } + requestMessage = message; + call.startRead(); + }, + onReceiveHalfClose() { + if (!requestMessage) { + call.sendStatus({ + code: Status.UNIMPLEMENTED, + details: `Received no request message for server streaming method ${handler.path}`, + metadata: null, + }); + return; + } + stream = new ServerWritableStreamImpl( + handler.path, + call, + requestMetadata, + requestMessage + ); + try { + handler.func(stream); + } catch (err) { + call.sendStatus({ + code: Status.UNKNOWN, + details: `Server method handler threw error ${ + (err as Error).message + }`, + metadata: null, + }); + } + }, + onCancel() { + if (stream) { + stream.cancelled = true; + stream.emit('cancelled', 'cancelled'); + stream.destroy(); + } + }, + }); } function handleBidiStreaming( - call: Http2ServerCallStream, - handler: BidiStreamingHandler, - metadata: Metadata + call: ServerInterceptingCallInterface, + handler: BidiStreamingHandler ): void { - const stream = new ServerDuplexStreamImpl( - call, - metadata, - handler.serialize, - handler.deserialize - ); - - if (call.cancelled) { - return; - } - - handler.func(stream); + let stream: ServerDuplexStream; + + call.start({ + onReceiveMetadata(metadata) { + stream = new ServerDuplexStreamImpl(handler.path, call, metadata); + try { + handler.func(stream); + } catch (err) { + call.sendStatus({ + code: Status.UNKNOWN, + details: `Server method handler threw error ${ + (err as Error).message + }`, + metadata: null, + }); + } + }, + onReceiveMessage(message) { + stream.push(message); + }, + onReceiveHalfClose() { + stream.push(null); + }, + onCancel() { + if (stream) { + stream.cancelled = true; + stream.emit('cancelled', 'cancelled'); + stream.destroy(); + } + }, + }); } diff --git a/packages/grpc-js/src/service-config.ts b/packages/grpc-js/src/service-config.ts index ed225e087..b0d0d5576 100644 --- a/packages/grpc-js/src/service-config.ts +++ b/packages/grpc-js/src/service-config.ts @@ -27,25 +27,52 @@ /* eslint-disable @typescript-eslint/no-explicit-any */ import * as os from 'os'; -import { LoadBalancingConfig, validateLoadBalancingConfig } from './load-balancer'; +import { Status } from './constants'; +import { Duration } from './duration'; export interface MethodConfigName { - service: string; + service?: string; method?: string; } +export interface RetryPolicy { + maxAttempts: number; + initialBackoff: string; + maxBackoff: string; + backoffMultiplier: number; + retryableStatusCodes: (Status | string)[]; +} + +export interface HedgingPolicy { + maxAttempts: number; + hedgingDelay?: string; + nonFatalStatusCodes?: (Status | string)[]; +} + export interface MethodConfig { name: MethodConfigName[]; waitForReady?: boolean; - timeout?: string; + timeout?: Duration; maxRequestBytes?: number; maxResponseBytes?: number; + retryPolicy?: RetryPolicy; + hedgingPolicy?: HedgingPolicy; +} + +export interface RetryThrottling { + maxTokens: number; + tokenRatio: number; +} + +export interface LoadBalancingConfig { + [key: string]: object; } export interface ServiceConfig { loadBalancingPolicy?: string; loadBalancingConfig: LoadBalancingConfig[]; methodConfig: MethodConfig[]; + retryThrottling?: RetryThrottling; } export interface ServiceConfigCanaryConfig { @@ -59,7 +86,7 @@ export interface ServiceConfigCanaryConfig { * Recognizes a number with up to 9 digits after the decimal point, followed by * an "s", representing a number of seconds. */ -const TIMEOUT_REGEX = /^\d+(\.\d{1,9})?s$/; +const DURATION_REGEX = /^\d+(\.\d{1,9})?s$/; /** * Client language name used for determining whether this client matches a @@ -68,19 +95,164 @@ const TIMEOUT_REGEX = /^\d+(\.\d{1,9})?s$/; const CLIENT_LANGUAGE_STRING = 'node'; function validateName(obj: any): MethodConfigName { - if (!('service' in obj) || typeof obj.service !== 'string') { - throw new Error('Invalid method config name: invalid service'); + // In this context, and unset field and '' are considered the same + if ('service' in obj && obj.service !== '') { + if (typeof obj.service !== 'string') { + throw new Error( + `Invalid method config name: invalid service: expected type string, got ${typeof obj.service}` + ); + } + if ('method' in obj && obj.method !== '') { + if (typeof obj.method !== 'string') { + throw new Error( + `Invalid method config name: invalid method: expected type string, got ${typeof obj.service}` + ); + } + return { + service: obj.service, + method: obj.method, + }; + } else { + return { + service: obj.service, + }; + } + } else { + if ('method' in obj && obj.method !== undefined) { + throw new Error( + `Invalid method config name: method set with empty or unset service` + ); + } + return {}; } - const result: MethodConfigName = { - service: obj.service, - }; - if ('method' in obj) { - if (typeof obj.method === 'string') { - result.method = obj.method; +} + +function validateRetryPolicy(obj: any): RetryPolicy { + if ( + !('maxAttempts' in obj) || + !Number.isInteger(obj.maxAttempts) || + obj.maxAttempts < 2 + ) { + throw new Error( + 'Invalid method config retry policy: maxAttempts must be an integer at least 2' + ); + } + if ( + !('initialBackoff' in obj) || + typeof obj.initialBackoff !== 'string' || + !DURATION_REGEX.test(obj.initialBackoff) + ) { + throw new Error( + 'Invalid method config retry policy: initialBackoff must be a string consisting of a positive integer followed by s' + ); + } + if ( + !('maxBackoff' in obj) || + typeof obj.maxBackoff !== 'string' || + !DURATION_REGEX.test(obj.maxBackoff) + ) { + throw new Error( + 'Invalid method config retry policy: maxBackoff must be a string consisting of a positive integer followed by s' + ); + } + if ( + !('backoffMultiplier' in obj) || + typeof obj.backoffMultiplier !== 'number' || + obj.backoffMultiplier <= 0 + ) { + throw new Error( + 'Invalid method config retry policy: backoffMultiplier must be a number greater than 0' + ); + } + if ( + !('retryableStatusCodes' in obj && Array.isArray(obj.retryableStatusCodes)) + ) { + throw new Error( + 'Invalid method config retry policy: retryableStatusCodes is required' + ); + } + if (obj.retryableStatusCodes.length === 0) { + throw new Error( + 'Invalid method config retry policy: retryableStatusCodes must be non-empty' + ); + } + for (const value of obj.retryableStatusCodes) { + if (typeof value === 'number') { + if (!Object.values(Status).includes(value)) { + throw new Error( + 'Invalid method config retry policy: retryableStatusCodes value not in status code range' + ); + } + } else if (typeof value === 'string') { + if (!Object.values(Status).includes(value.toUpperCase())) { + throw new Error( + 'Invalid method config retry policy: retryableStatusCodes value not a status code name' + ); + } } else { - throw new Error('Invalid method config name: invalid method'); + throw new Error( + 'Invalid method config retry policy: retryableStatusCodes value must be a string or number' + ); + } + } + return { + maxAttempts: obj.maxAttempts, + initialBackoff: obj.initialBackoff, + maxBackoff: obj.maxBackoff, + backoffMultiplier: obj.backoffMultiplier, + retryableStatusCodes: obj.retryableStatusCodes, + }; +} + +function validateHedgingPolicy(obj: any): HedgingPolicy { + if ( + !('maxAttempts' in obj) || + !Number.isInteger(obj.maxAttempts) || + obj.maxAttempts < 2 + ) { + throw new Error( + 'Invalid method config hedging policy: maxAttempts must be an integer at least 2' + ); + } + if ( + 'hedgingDelay' in obj && + (typeof obj.hedgingDelay !== 'string' || + !DURATION_REGEX.test(obj.hedgingDelay)) + ) { + throw new Error( + 'Invalid method config hedging policy: hedgingDelay must be a string consisting of a positive integer followed by s' + ); + } + if ('nonFatalStatusCodes' in obj && Array.isArray(obj.nonFatalStatusCodes)) { + for (const value of obj.nonFatalStatusCodes) { + if (typeof value === 'number') { + if (!Object.values(Status).includes(value)) { + throw new Error( + 'Invlid method config hedging policy: nonFatalStatusCodes value not in status code range' + ); + } + } else if (typeof value === 'string') { + if (!Object.values(Status).includes(value.toUpperCase())) { + throw new Error( + 'Invlid method config hedging policy: nonFatalStatusCodes value not a status code name' + ); + } + } else { + throw new Error( + 'Invlid method config hedging policy: nonFatalStatusCodes value must be a string or number' + ); + } } } + const result: HedgingPolicy = { + maxAttempts: obj.maxAttempts, + }; + if (obj.hedgingDelay) { + result.hedgingDelay = obj.hedgingDelay; + } + if (obj.nonFatalStatusCodes) { + result.nonFatalStatusCodes = obj.nonFatalStatusCodes; + } return result; } @@ -101,13 +273,34 @@ function validateMethodConfig(obj: any): MethodConfig { result.waitForReady = obj.waitForReady; } if ('timeout' in obj) { - if ( - !(typeof obj.timeout === 'string') || - !TIMEOUT_REGEX.test(obj.timeout) + if (typeof obj.timeout === 'object') { + if ( + !('seconds' in obj.timeout) || + !(typeof obj.timeout.seconds === 'number') + ) { + throw new Error('Invalid method config: invalid timeout.seconds'); + } + if ( + !('nanos' in obj.timeout) || + !(typeof obj.timeout.nanos === 'number') + ) { + throw new Error('Invalid method config: invalid timeout.nanos'); + } + result.timeout = obj.timeout; + } else if ( + typeof obj.timeout === 'string' && + DURATION_REGEX.test(obj.timeout) ) { + const timeoutParts = obj.timeout + .substring(0, obj.timeout.length - 1) + .split('.'); + result.timeout = { + seconds: timeoutParts[0] | 0, + nanos: (timeoutParts[1] ?? 0) | 0, + }; + } else { throw new Error('Invalid method config: invalid timeout'); } - result.timeout = obj.timeout; } if ('maxRequestBytes' in obj) { if (typeof obj.maxRequestBytes !== 'number') { @@ -121,9 +314,68 @@ function validateMethodConfig(obj: any): MethodConfig { } result.maxResponseBytes = obj.maxResponseBytes; } + if ('retryPolicy' in obj) { + if ('hedgingPolicy' in obj) { + throw new Error( + 'Invalid method config: retryPolicy and hedgingPolicy cannot both be specified' + ); + } else { + result.retryPolicy = validateRetryPolicy(obj.retryPolicy); + } + } else if ('hedgingPolicy' in obj) { + result.hedgingPolicy = validateHedgingPolicy(obj.hedgingPolicy); + } return result; } +export function validateRetryThrottling(obj: any): RetryThrottling { + if ( + !('maxTokens' in obj) || + typeof obj.maxTokens !== 'number' || + obj.maxTokens <= 0 || + obj.maxTokens > 1000 + ) { + throw new Error( + 'Invalid retryThrottling: maxTokens must be a number in (0, 1000]' + ); + } + if ( + !('tokenRatio' in obj) || + typeof obj.tokenRatio !== 'number' || + obj.tokenRatio <= 0 + ) { + throw new Error( + 'Invalid retryThrottling: tokenRatio must be a number greater than 0' + ); + } + return { + maxTokens: +(obj.maxTokens as number).toFixed(3), + tokenRatio: +(obj.tokenRatio as number).toFixed(3), + }; +} + +function validateLoadBalancingConfig(obj: any): LoadBalancingConfig { + if (!(typeof obj === 'object' && obj !== null)) { + throw new Error( + `Invalid loadBalancingConfig: unexpected type ${typeof obj}` + ); + } + const keys = Object.keys(obj); + if (keys.length > 1) { + throw new Error( + `Invalid loadBalancingConfig: unexpected multiple keys ${keys}` + ); + } + if (keys.length === 0) { + throw new Error( + 'Invalid loadBalancingConfig: load balancing policy name required' + ); + } + return { + [keys[0]]: obj[keys[0]], + }; +} + export function validateServiceConfig(obj: any): ServiceConfig { const result: ServiceConfig = { loadBalancingConfig: [], @@ -152,6 +404,9 @@ export function validateServiceConfig(obj: any): ServiceConfig { } } } + if ('retryThrottling' in obj) { + result.retryThrottling = validateRetryThrottling(obj.retryThrottling); + } // Validate method name uniqueness const seenMethodNames: MethodConfigName[] = []; for (const methodConfig of result.methodConfig) { diff --git a/packages/grpc-js/src/status-builder.ts b/packages/grpc-js/src/status-builder.ts index 1109af1ac..78e2ea310 100644 --- a/packages/grpc-js/src/status-builder.ts +++ b/packages/grpc-js/src/status-builder.ts @@ -15,7 +15,7 @@ * */ -import { StatusObject } from './call-stream'; +import { StatusObject } from './call-interface'; import { Status } from './constants'; import { Metadata } from './metadata'; diff --git a/packages/grpc-js/src/stream-decoder.ts b/packages/grpc-js/src/stream-decoder.ts index 671ad41ae..ea669d14c 100644 --- a/packages/grpc-js/src/stream-decoder.ts +++ b/packages/grpc-js/src/stream-decoder.ts @@ -30,6 +30,8 @@ export class StreamDecoder { private readPartialMessage: Buffer[] = []; private readMessageRemaining = 0; + constructor(private maxReadMessageLength: number) {} + write(data: Buffer): Buffer[] { let readHead = 0; let toRead: number; @@ -60,6 +62,9 @@ export class StreamDecoder { // readSizeRemaining >=0 here if (this.readSizeRemaining === 0) { this.readMessageSize = this.readPartialSize.readUInt32BE(0); + if (this.maxReadMessageLength !== -1 && this.readMessageSize > this.maxReadMessageLength) { + throw new Error(`Received message larger than max (${this.readMessageSize} vs ${this.maxReadMessageLength})`); + } this.readMessageRemaining = this.readMessageSize; if (this.readMessageRemaining > 0) { this.readState = ReadState.READING_MESSAGE; diff --git a/packages/grpc-js/src/subchannel-address.ts b/packages/grpc-js/src/subchannel-address.ts new file mode 100644 index 000000000..7e4f3e475 --- /dev/null +++ b/packages/grpc-js/src/subchannel-address.ts @@ -0,0 +1,252 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { isIP, isIPv6 } from 'net'; + +export interface TcpSubchannelAddress { + port: number; + host: string; +} + +export interface IpcSubchannelAddress { + path: string; +} +/** + * This represents a single backend address to connect to. This interface is a + * subset of net.SocketConnectOpts, i.e. the options described at + * https://nodejs.org/api/net.html#net_socket_connect_options_connectlistener. + * Those are in turn a subset of the options that can be passed to http2.connect. + */ + +export type SubchannelAddress = TcpSubchannelAddress | IpcSubchannelAddress; + +export function isTcpSubchannelAddress( + address: SubchannelAddress +): address is TcpSubchannelAddress { + return 'port' in address; +} + +export function subchannelAddressEqual( + address1?: SubchannelAddress, + address2?: SubchannelAddress +): boolean { + if (!address1 && !address2) { + return true; + } + if (!address1 || !address2) { + return false; + } + if (isTcpSubchannelAddress(address1)) { + return ( + isTcpSubchannelAddress(address2) && + address1.host === address2.host && + address1.port === address2.port + ); + } else { + return !isTcpSubchannelAddress(address2) && address1.path === address2.path; + } +} + +export function subchannelAddressToString(address: SubchannelAddress): string { + if (isTcpSubchannelAddress(address)) { + if (isIPv6(address.host)) { + return '[' + address.host + ']:' + address.port; + } else { + return address.host + ':' + address.port; + } + } else { + return address.path; + } +} + +const DEFAULT_PORT = 443; + +export function stringToSubchannelAddress( + addressString: string, + port?: number +): SubchannelAddress { + if (isIP(addressString)) { + return { + host: addressString, + port: port ?? DEFAULT_PORT, + }; + } else { + return { + path: addressString, + }; + } +} + +export interface Endpoint { + addresses: SubchannelAddress[]; +} + +export function endpointEqual(endpoint1: Endpoint, endpoint2: Endpoint) { + if (endpoint1.addresses.length !== endpoint2.addresses.length) { + return false; + } + for (let i = 0; i < endpoint1.addresses.length; i++) { + if ( + !subchannelAddressEqual(endpoint1.addresses[i], endpoint2.addresses[i]) + ) { + return false; + } + } + return true; +} + +export function endpointToString(endpoint: Endpoint): string { + return ( + '[' + endpoint.addresses.map(subchannelAddressToString).join(', ') + ']' + ); +} + +export function endpointHasAddress( + endpoint: Endpoint, + expectedAddress: SubchannelAddress +): boolean { + for (const address of endpoint.addresses) { + if (subchannelAddressEqual(address, expectedAddress)) { + return true; + } + } + return false; +} + +interface EndpointMapEntry { + key: Endpoint; + value: ValueType; +} + +function endpointEqualUnordered( + endpoint1: Endpoint, + endpoint2: Endpoint +): boolean { + if (endpoint1.addresses.length !== endpoint2.addresses.length) { + return false; + } + for (const address1 of endpoint1.addresses) { + let matchFound = false; + for (const address2 of endpoint2.addresses) { + if (subchannelAddressEqual(address1, address2)) { + matchFound = true; + break; + } + } + if (!matchFound) { + return false; + } + } + return true; +} + +export class EndpointMap { + private map: Set> = new Set(); + + get size() { + return this.map.size; + } + + getForSubchannelAddress(address: SubchannelAddress): ValueType | undefined { + for (const entry of this.map) { + if (endpointHasAddress(entry.key, address)) { + return entry.value; + } + } + return undefined; + } + + /** + * Delete any entries in this map with keys that are not in endpoints + * @param endpoints + */ + deleteMissing(endpoints: Endpoint[]): ValueType[] { + const removedValues: ValueType[] = []; + for (const entry of this.map) { + let foundEntry = false; + for (const endpoint of endpoints) { + if (endpointEqualUnordered(endpoint, entry.key)) { + foundEntry = true; + } + } + if (!foundEntry) { + removedValues.push(entry.value); + this.map.delete(entry); + } + } + return removedValues; + } + + get(endpoint: Endpoint): ValueType | undefined { + for (const entry of this.map) { + if (endpointEqualUnordered(endpoint, entry.key)) { + return entry.value; + } + } + return undefined; + } + + set(endpoint: Endpoint, mapEntry: ValueType) { + for (const entry of this.map) { + if (endpointEqualUnordered(endpoint, entry.key)) { + entry.value = mapEntry; + return; + } + } + this.map.add({ key: endpoint, value: mapEntry }); + } + + delete(endpoint: Endpoint) { + for (const entry of this.map) { + if (endpointEqualUnordered(endpoint, entry.key)) { + this.map.delete(entry); + return; + } + } + } + + has(endpoint: Endpoint): boolean { + for (const entry of this.map) { + if (endpointEqualUnordered(endpoint, entry.key)) { + return true; + } + } + return false; + } + + clear() { + this.map.clear(); + } + + *keys(): IterableIterator { + for (const entry of this.map) { + yield entry.key; + } + } + + *values(): IterableIterator { + for (const entry of this.map) { + yield entry.value; + } + } + + *entries(): IterableIterator<[Endpoint, ValueType]> { + for (const entry of this.map) { + yield [entry.key, entry.value]; + } + } +} diff --git a/packages/grpc-js/src/subchannel-call.ts b/packages/grpc-js/src/subchannel-call.ts new file mode 100644 index 000000000..bee00119f --- /dev/null +++ b/packages/grpc-js/src/subchannel-call.ts @@ -0,0 +1,586 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import * as http2 from 'http2'; +import * as os from 'os'; + +import { DEFAULT_MAX_RECEIVE_MESSAGE_LENGTH, Status } from './constants'; +import { Metadata } from './metadata'; +import { StreamDecoder } from './stream-decoder'; +import * as logging from './logging'; +import { LogVerbosity } from './constants'; +import { + InterceptingListener, + MessageContext, + StatusObject, + WriteCallback, +} from './call-interface'; +import { CallEventTracker, Transport } from './transport'; + +const TRACER_NAME = 'subchannel_call'; + +/** + * https://nodejs.org/api/errors.html#errors_class_systemerror + */ +interface SystemError extends Error { + address?: string; + code: string; + dest?: string; + errno: number; + info?: object; + message: string; + path?: string; + port?: number; + syscall: string; +} + +/** + * Should do approximately the same thing as util.getSystemErrorName but the + * TypeScript types don't have that function for some reason so I just made my + * own. + * @param errno + */ +function getSystemErrorName(errno: number): string { + for (const [name, num] of Object.entries(os.constants.errno)) { + if (num === errno) { + return name; + } + } + return 'Unknown system error ' + errno; +} + +export interface SubchannelCall { + cancelWithStatus(status: Status, details: string): void; + getPeer(): string; + sendMessageWithContext(context: MessageContext, message: Buffer): void; + startRead(): void; + halfClose(): void; + getCallNumber(): number; + getDeadlineInfo(): string[]; +} + +export interface StatusObjectWithRstCode extends StatusObject { + rstCode?: number; +} + +export interface SubchannelCallInterceptingListener + extends InterceptingListener { + onReceiveStatus(status: StatusObjectWithRstCode): void; +} + +function mapHttpStatusCode(code: number): StatusObject { + const details = `Received HTTP status code ${code}`; + let mappedStatusCode: number; + switch (code) { + // TODO(murgatroid99): handle 100 and 101 + case 400: + mappedStatusCode = Status.INTERNAL; + break; + case 401: + mappedStatusCode = Status.UNAUTHENTICATED; + break; + case 403: + mappedStatusCode = Status.PERMISSION_DENIED; + break; + case 404: + mappedStatusCode = Status.UNIMPLEMENTED; + break; + case 429: + case 502: + case 503: + case 504: + mappedStatusCode = Status.UNAVAILABLE; + break; + default: + mappedStatusCode = Status.UNKNOWN; + } + return { + code: mappedStatusCode, + details: details, + metadata: new Metadata() + }; +} + +export class Http2SubchannelCall implements SubchannelCall { + private decoder: StreamDecoder; + + private isReadFilterPending = false; + private isPushPending = false; + private canPush = false; + /** + * Indicates that an 'end' event has come from the http2 stream, so there + * will be no more data events. + */ + private readsClosed = false; + + private statusOutput = false; + + private unpushedReadMessages: Buffer[] = []; + + private httpStatusCode: number | undefined; + + // This is populated (non-null) if and only if the call has ended + private finalStatus: StatusObject | null = null; + + private internalError: SystemError | null = null; + + private serverEndedCall = false; + + constructor( + private readonly http2Stream: http2.ClientHttp2Stream, + private readonly callEventTracker: CallEventTracker, + private readonly listener: SubchannelCallInterceptingListener, + private readonly transport: Transport, + private readonly callId: number + ) { + const maxReceiveMessageLength = transport.getOptions()['grpc.max_receive_message_length'] ?? DEFAULT_MAX_RECEIVE_MESSAGE_LENGTH; + this.decoder = new StreamDecoder(maxReceiveMessageLength); + http2Stream.on('response', (headers, flags) => { + let headersString = ''; + for (const header of Object.keys(headers)) { + headersString += '\t\t' + header + ': ' + headers[header] + '\n'; + } + this.trace('Received server headers:\n' + headersString); + this.httpStatusCode = headers[':status']; + + if (flags & http2.constants.NGHTTP2_FLAG_END_STREAM) { + this.handleTrailers(headers); + } else { + let metadata: Metadata; + try { + metadata = Metadata.fromHttp2Headers(headers); + } catch (error) { + this.endCall({ + code: Status.UNKNOWN, + details: (error as Error).message, + metadata: new Metadata(), + }); + return; + } + this.listener.onReceiveMetadata(metadata); + } + }); + http2Stream.on('trailers', (headers: http2.IncomingHttpHeaders) => { + this.handleTrailers(headers); + }); + http2Stream.on('data', (data: Buffer) => { + /* If the status has already been output, allow the http2 stream to + * drain without processing the data. */ + if (this.statusOutput) { + return; + } + this.trace('receive HTTP/2 data frame of length ' + data.length); + let messages: Buffer[]; + try { + messages = this.decoder.write(data); + } catch (e) { + this.cancelWithStatus(Status.RESOURCE_EXHAUSTED, (e as Error).message); + return; + } + + for (const message of messages) { + this.trace('parsed message of length ' + message.length); + this.callEventTracker!.addMessageReceived(); + this.tryPush(message); + } + }); + http2Stream.on('end', () => { + this.readsClosed = true; + this.maybeOutputStatus(); + }); + http2Stream.on('close', () => { + this.serverEndedCall = true; + /* Use process.next tick to ensure that this code happens after any + * "error" event that may be emitted at about the same time, so that + * we can bubble up the error message from that event. */ + process.nextTick(() => { + this.trace('HTTP/2 stream closed with code ' + http2Stream.rstCode); + /* If we have a final status with an OK status code, that means that + * we have received all of the messages and we have processed the + * trailers and the call completed successfully, so it doesn't matter + * how the stream ends after that */ + if (this.finalStatus?.code === Status.OK) { + return; + } + let code: Status; + let details = ''; + switch (http2Stream.rstCode) { + case http2.constants.NGHTTP2_NO_ERROR: + /* If we get a NO_ERROR code and we already have a status, the + * stream completed properly and we just haven't fully processed + * it yet */ + if (this.finalStatus !== null) { + return; + } + if (this.httpStatusCode && this.httpStatusCode !== 200) { + const mappedStatus = mapHttpStatusCode(this.httpStatusCode); + code = mappedStatus.code; + details = mappedStatus.details; + } else { + code = Status.INTERNAL; + details = `Received RST_STREAM with code ${http2Stream.rstCode} (Call ended without gRPC status)`; + } + break; + case http2.constants.NGHTTP2_REFUSED_STREAM: + code = Status.UNAVAILABLE; + details = 'Stream refused by server'; + break; + case http2.constants.NGHTTP2_CANCEL: + code = Status.CANCELLED; + details = 'Call cancelled'; + break; + case http2.constants.NGHTTP2_ENHANCE_YOUR_CALM: + code = Status.RESOURCE_EXHAUSTED; + details = 'Bandwidth exhausted or memory limit exceeded'; + break; + case http2.constants.NGHTTP2_INADEQUATE_SECURITY: + code = Status.PERMISSION_DENIED; + details = 'Protocol not secure enough'; + break; + case http2.constants.NGHTTP2_INTERNAL_ERROR: + code = Status.INTERNAL; + if (this.internalError === null) { + /* This error code was previously handled in the default case, and + * there are several instances of it online, so I wanted to + * preserve the original error message so that people find existing + * information in searches, but also include the more recognizable + * "Internal server error" message. */ + details = `Received RST_STREAM with code ${http2Stream.rstCode} (Internal server error)`; + } else { + if ( + this.internalError.code === 'ECONNRESET' || + this.internalError.code === 'ETIMEDOUT' + ) { + code = Status.UNAVAILABLE; + details = this.internalError.message; + } else { + /* The "Received RST_STREAM with code ..." error is preserved + * here for continuity with errors reported online, but the + * error message at the end will probably be more relevant in + * most cases. */ + details = `Received RST_STREAM with code ${http2Stream.rstCode} triggered by internal client error: ${this.internalError.message}`; + } + } + break; + default: + code = Status.INTERNAL; + details = `Received RST_STREAM with code ${http2Stream.rstCode}`; + } + // This is a no-op if trailers were received at all. + // This is OK, because status codes emitted here correspond to more + // catastrophic issues that prevent us from receiving trailers in the + // first place. + this.endCall({ + code, + details, + metadata: new Metadata(), + rstCode: http2Stream.rstCode, + }); + }); + }); + http2Stream.on('error', (err: SystemError) => { + /* We need an error handler here to stop "Uncaught Error" exceptions + * from bubbling up. However, errors here should all correspond to + * "close" events, where we will handle the error more granularly */ + /* Specifically looking for stream errors that were *not* constructed + * from a RST_STREAM response here: + * https://github.com/nodejs/node/blob/8b8620d580314050175983402dfddf2674e8e22a/lib/internal/http2/core.js#L2267 + */ + if (err.code !== 'ERR_HTTP2_STREAM_ERROR') { + this.trace( + 'Node error event: message=' + + err.message + + ' code=' + + err.code + + ' errno=' + + getSystemErrorName(err.errno) + + ' syscall=' + + err.syscall + ); + this.internalError = err; + } + this.callEventTracker.onStreamEnd(false); + }); + } + getDeadlineInfo(): string[] { + return [`remote_addr=${this.getPeer()}`]; + } + + public onDisconnect() { + this.endCall({ + code: Status.UNAVAILABLE, + details: 'Connection dropped', + metadata: new Metadata(), + }); + } + + private outputStatus() { + /* Precondition: this.finalStatus !== null */ + if (!this.statusOutput) { + this.statusOutput = true; + this.trace( + 'ended with status: code=' + + this.finalStatus!.code + + ' details="' + + this.finalStatus!.details + + '"' + ); + this.callEventTracker.onCallEnd(this.finalStatus!); + /* We delay the actual action of bubbling up the status to insulate the + * cleanup code in this class from any errors that may be thrown in the + * upper layers as a result of bubbling up the status. In particular, + * if the status is not OK, the "error" event may be emitted + * synchronously at the top level, which will result in a thrown error if + * the user does not handle that event. */ + process.nextTick(() => { + this.listener.onReceiveStatus(this.finalStatus!); + }); + /* Leave the http2 stream in flowing state to drain incoming messages, to + * ensure that the stream closure completes. The call stream already does + * not push more messages after the status is output, so the messages go + * nowhere either way. */ + this.http2Stream.resume(); + } + } + + private trace(text: string): void { + logging.trace( + LogVerbosity.DEBUG, + TRACER_NAME, + '[' + this.callId + '] ' + text + ); + } + + /** + * On first call, emits a 'status' event with the given StatusObject. + * Subsequent calls are no-ops. + * @param status The status of the call. + */ + private endCall(status: StatusObjectWithRstCode): void { + /* If the status is OK and a new status comes in (e.g. from a + * deserialization failure), that new status takes priority */ + if (this.finalStatus === null || this.finalStatus.code === Status.OK) { + this.finalStatus = status; + this.maybeOutputStatus(); + } + this.destroyHttp2Stream(); + } + + private maybeOutputStatus() { + if (this.finalStatus !== null) { + /* The combination check of readsClosed and that the two message buffer + * arrays are empty checks that there all incoming data has been fully + * processed */ + if ( + this.finalStatus.code !== Status.OK || + (this.readsClosed && + this.unpushedReadMessages.length === 0 && + !this.isReadFilterPending && + !this.isPushPending) + ) { + this.outputStatus(); + } + } + } + + private push(message: Buffer): void { + this.trace( + 'pushing to reader message of length ' + + (message instanceof Buffer ? message.length : null) + ); + this.canPush = false; + this.isPushPending = true; + process.nextTick(() => { + this.isPushPending = false; + /* If we have already output the status any later messages should be + * ignored, and can cause out-of-order operation errors higher up in the + * stack. Checking as late as possible here to avoid any race conditions. + */ + if (this.statusOutput) { + return; + } + this.listener.onReceiveMessage(message); + this.maybeOutputStatus(); + }); + } + + private tryPush(messageBytes: Buffer): void { + if (this.canPush) { + this.http2Stream!.pause(); + this.push(messageBytes); + } else { + this.trace( + 'unpushedReadMessages.push message of length ' + messageBytes.length + ); + this.unpushedReadMessages.push(messageBytes); + } + } + + private handleTrailers(headers: http2.IncomingHttpHeaders) { + this.serverEndedCall = true; + this.callEventTracker.onStreamEnd(true); + let headersString = ''; + for (const header of Object.keys(headers)) { + headersString += '\t\t' + header + ': ' + headers[header] + '\n'; + } + this.trace('Received server trailers:\n' + headersString); + let metadata: Metadata; + try { + metadata = Metadata.fromHttp2Headers(headers); + } catch (e) { + metadata = new Metadata(); + } + const metadataMap = metadata.getMap(); + let status: StatusObject; + if (typeof metadataMap['grpc-status'] === 'string') { + const receivedStatus: Status = Number(metadataMap['grpc-status']); + this.trace('received status code ' + receivedStatus + ' from server'); + metadata.remove('grpc-status'); + let details = ''; + if (typeof metadataMap['grpc-message'] === 'string') { + try { + details = decodeURI(metadataMap['grpc-message']); + } catch (e) { + details = metadataMap['grpc-message']; + } + metadata.remove('grpc-message'); + this.trace( + 'received status details string "' + details + '" from server' + ); + } + status = { + code: receivedStatus, + details: details, + metadata: metadata + }; + } else if (this.httpStatusCode) { + status = mapHttpStatusCode(this.httpStatusCode); + status.metadata = metadata; + } else { + status = { + code: Status.UNKNOWN, + details: 'No status information received', + metadata: metadata + }; + } + // This is a no-op if the call was already ended when handling headers. + this.endCall(status); + } + + private destroyHttp2Stream() { + // The http2 stream could already have been destroyed if cancelWithStatus + // is called in response to an internal http2 error. + if (this.http2Stream.destroyed) { + return; + } + /* If the server ended the call, sending an RST_STREAM is redundant, so we + * just half close on the client side instead to finish closing the stream. + */ + if (this.serverEndedCall) { + this.http2Stream.end(); + } else { + /* If the call has ended with an OK status, communicate that when closing + * the stream, partly to avoid a situation in which we detect an error + * RST_STREAM as a result after we have the status */ + let code: number; + if (this.finalStatus?.code === Status.OK) { + code = http2.constants.NGHTTP2_NO_ERROR; + } else { + code = http2.constants.NGHTTP2_CANCEL; + } + this.trace('close http2 stream with code ' + code); + this.http2Stream.close(code); + } + } + + cancelWithStatus(status: Status, details: string): void { + this.trace( + 'cancelWithStatus code: ' + status + ' details: "' + details + '"' + ); + this.endCall({ code: status, details, metadata: new Metadata() }); + } + + getStatus(): StatusObject | null { + return this.finalStatus; + } + + getPeer(): string { + return this.transport.getPeerName(); + } + + getCallNumber(): number { + return this.callId; + } + + startRead() { + /* If the stream has ended with an error, we should not emit any more + * messages and we should communicate that the stream has ended */ + if (this.finalStatus !== null && this.finalStatus.code !== Status.OK) { + this.readsClosed = true; + this.maybeOutputStatus(); + return; + } + this.canPush = true; + if (this.unpushedReadMessages.length > 0) { + const nextMessage: Buffer = this.unpushedReadMessages.shift()!; + this.push(nextMessage); + return; + } + /* Only resume reading from the http2Stream if we don't have any pending + * messages to emit */ + this.http2Stream.resume(); + } + + sendMessageWithContext(context: MessageContext, message: Buffer) { + this.trace('write() called with message of length ' + message.length); + const cb: WriteCallback = (error?: Error | null) => { + /* nextTick here ensures that no stream action can be taken in the call + * stack of the write callback, in order to hopefully work around + * https://github.com/nodejs/node/issues/49147 */ + process.nextTick(() => { + let code: Status = Status.UNAVAILABLE; + if ( + (error as NodeJS.ErrnoException)?.code === + 'ERR_STREAM_WRITE_AFTER_END' + ) { + code = Status.INTERNAL; + } + if (error) { + this.cancelWithStatus(code, `Write error: ${error.message}`); + } + context.callback?.(); + }); + }; + this.trace('sending data chunk of length ' + message.length); + this.callEventTracker.addMessageSent(); + try { + this.http2Stream!.write(message, cb); + } catch (error) { + this.endCall({ + code: Status.UNAVAILABLE, + details: `Write failed with error ${(error as Error).message}`, + metadata: new Metadata(), + }); + } + } + + halfClose() { + this.trace('end() called'); + this.trace('calling end() on HTTP/2 stream'); + this.http2Stream.end(); + } +} diff --git a/packages/grpc-js/src/subchannel-interface.ts b/packages/grpc-js/src/subchannel-interface.ts new file mode 100644 index 000000000..6c314189a --- /dev/null +++ b/packages/grpc-js/src/subchannel-interface.ts @@ -0,0 +1,137 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import type { SubchannelRef } from './channelz'; +import { ConnectivityState } from './connectivity-state'; +import { Subchannel } from './subchannel'; + +export type ConnectivityStateListener = ( + subchannel: SubchannelInterface, + previousState: ConnectivityState, + newState: ConnectivityState, + keepaliveTime: number, + errorMessage?: string +) => void; + +export type HealthListener = (healthy: boolean) => void; + +/** + * This is an interface for load balancing policies to use to interact with + * subchannels. This allows load balancing policies to wrap and unwrap + * subchannels. + * + * Any load balancing policy that wraps subchannels must unwrap the subchannel + * in the picker, so that other load balancing policies consistently have + * access to their own wrapper objects. + */ +export interface SubchannelInterface { + getConnectivityState(): ConnectivityState; + addConnectivityStateListener(listener: ConnectivityStateListener): void; + removeConnectivityStateListener(listener: ConnectivityStateListener): void; + startConnecting(): void; + getAddress(): string; + throttleKeepalive(newKeepaliveTime: number): void; + ref(): void; + unref(): void; + getChannelzRef(): SubchannelRef; + isHealthy(): boolean; + addHealthStateWatcher(listener: HealthListener): void; + removeHealthStateWatcher(listener: HealthListener): void; + /** + * If this is a wrapper, return the wrapped subchannel, otherwise return this + */ + getRealSubchannel(): Subchannel; + /** + * Returns true if this and other both proxy the same underlying subchannel. + * Can be used instead of directly accessing getRealSubchannel to allow mocks + * to avoid implementing getRealSubchannel + */ + realSubchannelEquals(other: SubchannelInterface): boolean; +} + +export abstract class BaseSubchannelWrapper implements SubchannelInterface { + private healthy = true; + private healthListeners: Set = new Set(); + constructor(protected child: SubchannelInterface) { + child.addHealthStateWatcher(childHealthy => { + /* A change to the child health state only affects this wrapper's overall + * health state if this wrapper is reporting healthy. */ + if (this.healthy) { + this.updateHealthListeners(); + } + }); + } + + private updateHealthListeners(): void { + for (const listener of this.healthListeners) { + listener(this.isHealthy()); + } + } + + getConnectivityState(): ConnectivityState { + return this.child.getConnectivityState(); + } + addConnectivityStateListener(listener: ConnectivityStateListener): void { + this.child.addConnectivityStateListener(listener); + } + removeConnectivityStateListener(listener: ConnectivityStateListener): void { + this.child.removeConnectivityStateListener(listener); + } + startConnecting(): void { + this.child.startConnecting(); + } + getAddress(): string { + return this.child.getAddress(); + } + throttleKeepalive(newKeepaliveTime: number): void { + this.child.throttleKeepalive(newKeepaliveTime); + } + ref(): void { + this.child.ref(); + } + unref(): void { + this.child.unref(); + } + getChannelzRef(): SubchannelRef { + return this.child.getChannelzRef(); + } + isHealthy(): boolean { + return this.healthy && this.child.isHealthy(); + } + addHealthStateWatcher(listener: HealthListener): void { + this.healthListeners.add(listener); + } + removeHealthStateWatcher(listener: HealthListener): void { + this.healthListeners.delete(listener); + } + protected setHealthy(healthy: boolean): void { + if (healthy !== this.healthy) { + this.healthy = healthy; + /* A change to this wrapper's health state only affects the overall + * reported health state if the child is healthy. */ + if (this.child.isHealthy()) { + this.updateHealthListeners(); + } + } + } + getRealSubchannel(): Subchannel { + return this.child.getRealSubchannel(); + } + realSubchannelEquals(other: SubchannelInterface): boolean { + return this.getRealSubchannel() === other.getRealSubchannel(); + } +} diff --git a/packages/grpc-js/src/subchannel-pool.ts b/packages/grpc-js/src/subchannel-pool.ts index d28e3eac8..a5dec729d 100644 --- a/packages/grpc-js/src/subchannel-pool.ts +++ b/packages/grpc-js/src/subchannel-pool.ts @@ -16,13 +16,14 @@ */ import { ChannelOptions, channelOptionsEqual } from './channel-options'; +import { Subchannel } from './subchannel'; import { - Subchannel, SubchannelAddress, subchannelAddressEqual, -} from './subchannel'; +} from './subchannel-address'; import { ChannelCredentials } from './channel-credentials'; import { GrpcUri, uriToString } from './uri-parser'; +import { Http2SubchannelConnector } from './transport'; // 10 seconds in milliseconds. This value is arbitrary. /** @@ -44,15 +45,13 @@ export class SubchannelPool { /** * A timer of a task performing a periodic subchannel cleanup. */ - private cleanupTimer: NodeJS.Timer | null = null; + private cleanupTimer: NodeJS.Timeout | null = null; /** * A pool of subchannels use for making connections. Subchannels with the * exact same parameters will be reused. - * @param global If true, this is the global subchannel pool. Otherwise, it - * is the pool for a single channel. */ - constructor(private global: boolean) {} + constructor() {} /** * Unrefs all unused subchannels and cancels the cleanup task if all @@ -69,7 +68,7 @@ export class SubchannelPool { const subchannelObjArray = this.pool[channelTarget]; const refedSubchannels = subchannelObjArray.filter( - (value) => !value.subchannel.unrefIfOneRef() + value => !value.subchannel.unrefIfOneRef() ); if (refedSubchannels.length > 0) { @@ -95,7 +94,7 @@ export class SubchannelPool { * Ensures that the cleanup task is spawned. */ ensureCleanupTask(): void { - if (this.global && this.cleanupTimer === null) { + if (this.cleanupTimer === null) { this.cleanupTimer = setInterval(() => { this.unrefUnusedSubchannels(); }, REF_CHECK_INTERVAL); @@ -145,7 +144,8 @@ export class SubchannelPool { channelTargetUri, subchannelTarget, channelArguments, - channelCredentials + channelCredentials, + new Http2SubchannelConnector(channelTargetUri) ); if (!(channelTarget in this.pool)) { this.pool[channelTarget] = []; @@ -156,14 +156,12 @@ export class SubchannelPool { channelCredentials, subchannel, }); - if (this.global) { - subchannel.ref(); - } + subchannel.ref(); return subchannel; } } -const globalSubchannelPool = new SubchannelPool(true); +const globalSubchannelPool = new SubchannelPool(); /** * Get either the global subchannel pool, or a new subchannel pool. @@ -173,6 +171,6 @@ export function getSubchannelPool(global: boolean): SubchannelPool { if (global) { return globalSubchannelPool; } else { - return new SubchannelPool(false); + return new SubchannelPool(); } } diff --git a/packages/grpc-js/src/subchannel.ts b/packages/grpc-js/src/subchannel.ts index cb1650dc1..95b600c4c 100644 --- a/packages/grpc-js/src/subchannel.ts +++ b/packages/grpc-js/src/subchannel.ts @@ -15,118 +15,44 @@ * */ -import * as http2 from 'http2'; import { ChannelCredentials } from './channel-credentials'; import { Metadata } from './metadata'; -import { Http2CallStream } from './call-stream'; import { ChannelOptions } from './channel-options'; -import { PeerCertificate, checkServerIdentity } from 'tls'; -import { ConnectivityState } from './channel'; +import { ConnectivityState } from './connectivity-state'; import { BackoffTimeout, BackoffOptions } from './backoff-timeout'; -import { getDefaultAuthority } from './resolver'; import * as logging from './logging'; -import { LogVerbosity } from './constants'; -import { getProxiedConnection, ProxyConnectionResult } from './http_proxy'; -import * as net from 'net'; -import { GrpcUri, parseUri, splitHostPort, uriToString } from './uri-parser'; -import { ConnectionOptions } from 'tls'; -import { FilterFactory, Filter } from './filter'; - -const clientVersion = require('../../package.json').version; +import { LogVerbosity, Status } from './constants'; +import { GrpcUri, uriToString } from './uri-parser'; +import { + SubchannelAddress, + subchannelAddressToString, +} from './subchannel-address'; +import { + SubchannelRef, + ChannelzTrace, + ChannelzChildrenTracker, + ChannelzChildrenTrackerStub, + SubchannelInfo, + registerChannelzSubchannel, + ChannelzCallTracker, + ChannelzCallTrackerStub, + unregisterChannelzRef, + ChannelzTraceStub, +} from './channelz'; +import { + ConnectivityStateListener, + SubchannelInterface, +} from './subchannel-interface'; +import { SubchannelCallInterceptingListener } from './subchannel-call'; +import { SubchannelCall } from './subchannel-call'; +import { CallEventTracker, SubchannelConnector, Transport } from './transport'; const TRACER_NAME = 'subchannel'; -function trace(text: string): void { - logging.trace(LogVerbosity.DEBUG, TRACER_NAME, text); -} - -function refTrace(text: string): void { - logging.trace(LogVerbosity.DEBUG, 'subchannel_refcount', text); -} - -const MIN_CONNECT_TIMEOUT_MS = 20000; -const INITIAL_BACKOFF_MS = 1000; -const BACKOFF_MULTIPLIER = 1.6; -const MAX_BACKOFF_MS = 120000; -const BACKOFF_JITTER = 0.2; - /* setInterval and setTimeout only accept signed 32 bit integers. JS doesn't * have a constant for the max signed 32 bit integer, so this is a simple way * to calculate it */ const KEEPALIVE_MAX_TIME_MS = ~(1 << 31); -const KEEPALIVE_TIMEOUT_MS = 20000; - -export type ConnectivityStateListener = ( - subchannel: Subchannel, - previousState: ConnectivityState, - newState: ConnectivityState -) => void; - -const { - HTTP2_HEADER_AUTHORITY, - HTTP2_HEADER_CONTENT_TYPE, - HTTP2_HEADER_METHOD, - HTTP2_HEADER_PATH, - HTTP2_HEADER_TE, - HTTP2_HEADER_USER_AGENT, -} = http2.constants; - -/** - * Get a number uniformly at random in the range [min, max) - * @param min - * @param max - */ -function uniformRandom(min: number, max: number) { - return Math.random() * (max - min) + min; -} - -const tooManyPingsData: Buffer = Buffer.from('too_many_pings', 'ascii'); - -export interface TcpSubchannelAddress { - port: number; - host: string; -} - -export interface IpcSubchannelAddress { - path: string; -} - -/** - * This represents a single backend address to connect to. This interface is a - * subset of net.SocketConnectOpts, i.e. the options described at - * https://nodejs.org/api/net.html#net_socket_connect_options_connectlistener. - * Those are in turn a subset of the options that can be passed to http2.connect. - */ -export type SubchannelAddress = TcpSubchannelAddress | IpcSubchannelAddress; - -export function isTcpSubchannelAddress( - address: SubchannelAddress -): address is TcpSubchannelAddress { - return 'port' in address; -} - -export function subchannelAddressEqual( - address1: SubchannelAddress, - address2: SubchannelAddress -): boolean { - if (isTcpSubchannelAddress(address1)) { - return ( - isTcpSubchannelAddress(address2) && - address1.host === address2.host && - address1.port === address2.port - ); - } else { - return !isTcpSubchannelAddress(address2) && address1.path === address2.path; - } -} - -export function subchannelAddressToString(address: SubchannelAddress): string { - if (isTcpSubchannelAddress(address)) { - return address.host + ':' + address.port; - } else { - return address.path; - } -} export class Subchannel { /** @@ -137,7 +63,7 @@ export class Subchannel { /** * The underlying http2 session used to make requests. */ - private session: http2.ClientHttp2Session | null = null; + private transport: Transport | null = null; /** * Indicates that the subchannel should transition from TRANSIENT_FAILURE to * CONNECTING instead of IDLE when the backoff timeout ends. @@ -148,47 +74,11 @@ export class Subchannel { * state changes. Will be modified by `addConnectivityStateListener` and * `removeConnectivityStateListener` */ - private stateListeners: ConnectivityStateListener[] = []; - - /** - * A list of listener functions that will be called when the underlying - * socket disconnects. Used for ending active calls with an UNAVAILABLE - * status. - */ - private disconnectListeners: Array<() => void> = []; + private stateListeners: Set = new Set(); private backoffTimeout: BackoffTimeout; - /** - * The complete user agent string constructed using channel args. - */ - private userAgent: string; - - /** - * The amount of time in between sending pings - */ - private keepaliveTimeMs: number = KEEPALIVE_MAX_TIME_MS; - /** - * The amount of time to wait for an acknowledgement after sending a ping - */ - private keepaliveTimeoutMs: number = KEEPALIVE_TIMEOUT_MS; - /** - * Timer reference for timeout that indicates when to send the next ping - */ - private keepaliveIntervalId: NodeJS.Timer; - /** - * Timer reference tracking when the most recent ping will be considered lost - */ - private keepaliveTimeoutId: NodeJS.Timer; - /** - * Indicates whether keepalive pings should be sent without any active calls - */ - private keepaliveWithoutCalls: boolean = false; - - /** - * Tracks calls with references to this subchannel - */ - private callRefcount = 0; + private keepaliveTime: number; /** * Tracks channels and subchannel pools with references to this subchannel */ @@ -199,6 +89,19 @@ export class Subchannel { */ private subchannelAddressString: string; + // Channelz info + private readonly channelzEnabled: boolean = true; + private channelzRef: SubchannelRef; + + private channelzTrace: ChannelzTrace | ChannelzTraceStub; + private callTracker: ChannelzCallTracker | ChannelzCallTrackerStub; + private childrenTracker: + | ChannelzChildrenTracker + | ChannelzChildrenTrackerStub; + + // Channelz socket info + private streamTracker: ChannelzCallTracker | ChannelzCallTrackerStub; + /** * A class representing a connection to a single backend. * @param channelTarget The target string for the channel as a whole @@ -213,32 +116,9 @@ export class Subchannel { private channelTarget: GrpcUri, private subchannelAddress: SubchannelAddress, private options: ChannelOptions, - private credentials: ChannelCredentials + private credentials: ChannelCredentials, + private connector: SubchannelConnector ) { - // Build user-agent string. - this.userAgent = [ - options['grpc.primary_user_agent'], - `grpc-node-js/${clientVersion}`, - options['grpc.secondary_user_agent'], - ] - .filter((e) => e) - .join(' '); // remove falsey values first - - if ('grpc.keepalive_time_ms' in options) { - this.keepaliveTimeMs = options['grpc.keepalive_time_ms']!; - } - if ('grpc.keepalive_timeout_ms' in options) { - this.keepaliveTimeoutMs = options['grpc.keepalive_timeout_ms']!; - } - if ('grpc.keepalive_permit_without_calls' in options) { - this.keepaliveWithoutCalls = options['grpc.keepalive_permit_without_calls'] === 1; - } else { - this.keepaliveWithoutCalls = false; - } - this.keepaliveIntervalId = setTimeout(() => {}, 0); - clearTimeout(this.keepaliveIntervalId); - this.keepaliveTimeoutId = setTimeout(() => {}, 0); - clearTimeout(this.keepaliveTimeoutId); const backoffOptions: BackoffOptions = { initialDelay: options['grpc.initial_reconnect_backoff_ms'], maxDelay: options['grpc.max_reconnect_backoff_ms'], @@ -246,7 +126,71 @@ export class Subchannel { this.backoffTimeout = new BackoffTimeout(() => { this.handleBackoffTimer(); }, backoffOptions); + this.backoffTimeout.unref(); this.subchannelAddressString = subchannelAddressToString(subchannelAddress); + + this.keepaliveTime = options['grpc.keepalive_time_ms'] ?? -1; + + if (options['grpc.enable_channelz'] === 0) { + this.channelzEnabled = false; + this.channelzTrace = new ChannelzTraceStub(); + this.callTracker = new ChannelzCallTrackerStub(); + this.childrenTracker = new ChannelzChildrenTrackerStub(); + this.streamTracker = new ChannelzCallTrackerStub(); + } else { + this.channelzTrace = new ChannelzTrace(); + this.callTracker = new ChannelzCallTracker(); + this.childrenTracker = new ChannelzChildrenTracker(); + this.streamTracker = new ChannelzCallTracker(); + } + + this.channelzRef = registerChannelzSubchannel( + this.subchannelAddressString, + () => this.getChannelzInfo(), + this.channelzEnabled + ); + + this.channelzTrace.addTrace('CT_INFO', 'Subchannel created'); + this.trace( + 'Subchannel constructed with options ' + + JSON.stringify(options, undefined, 2) + ); + } + + private getChannelzInfo(): SubchannelInfo { + return { + state: this.connectivityState, + trace: this.channelzTrace, + callTracker: this.callTracker, + children: this.childrenTracker.getChildLists(), + target: this.subchannelAddressString, + }; + } + + private trace(text: string): void { + logging.trace( + LogVerbosity.DEBUG, + TRACER_NAME, + '(' + + this.channelzRef.id + + ') ' + + this.subchannelAddressString + + ' ' + + text + ); + } + + private refTrace(text: string): void { + logging.trace( + LogVerbosity.DEBUG, + 'subchannel_refcount', + '(' + + this.channelzRef.id + + ') ' + + this.subchannelAddressString + + ' ' + + text + ); } private handleBackoffTimer() { @@ -275,251 +219,60 @@ export class Subchannel { this.backoffTimeout.reset(); } - private sendPing() { - logging.trace(LogVerbosity.DEBUG, 'keepalive', 'Sending ping to ' + this.subchannelAddressString); - this.keepaliveTimeoutId = setTimeout(() => { - this.transitionToState([ConnectivityState.READY], ConnectivityState.IDLE); - }, this.keepaliveTimeoutMs); - this.keepaliveTimeoutId.unref?.(); - this.session!.ping( - (err: Error | null, duration: number, payload: Buffer) => { - clearTimeout(this.keepaliveTimeoutId); - } - ); - } - - private startKeepalivePings() { - this.keepaliveIntervalId = setInterval(() => { - this.sendPing(); - }, this.keepaliveTimeMs); - this.keepaliveIntervalId.unref?.() - /* Don't send a ping immediately because whatever caused us to start - * sending pings should also involve some network activity. */ - } - - private stopKeepalivePings() { - clearInterval(this.keepaliveIntervalId); - clearTimeout(this.keepaliveTimeoutId); - } - - private createSession(proxyConnectionResult: ProxyConnectionResult) { - if (proxyConnectionResult.realTarget) { - trace(this.subchannelAddressString + ' creating HTTP/2 session through proxy to ' + proxyConnectionResult.realTarget); - } else { - trace(this.subchannelAddressString + ' creating HTTP/2 session'); - } - const targetAuthority = getDefaultAuthority( - proxyConnectionResult.realTarget ?? this.channelTarget - ); - let connectionOptions: http2.SecureClientSessionOptions = - this.credentials._getConnectionOptions() || {}; - connectionOptions.maxSendHeaderBlockLength = Number.MAX_SAFE_INTEGER; - if ('grpc-node.max_session_memory' in this.options) { - connectionOptions.maxSessionMemory = this.options['grpc-node.max_session_memory']; - } - let addressScheme = 'http://'; - if ('secureContext' in connectionOptions) { - addressScheme = 'https://'; - // If provided, the value of grpc.ssl_target_name_override should be used - // to override the target hostname when checking server identity. - // This option is used for testing only. - if (this.options['grpc.ssl_target_name_override']) { - const sslTargetNameOverride = this.options[ - 'grpc.ssl_target_name_override' - ]!; - connectionOptions.checkServerIdentity = ( - host: string, - cert: PeerCertificate - ): Error | undefined => { - return checkServerIdentity(sslTargetNameOverride, cert); - }; - connectionOptions.servername = sslTargetNameOverride; - } else { - const authorityHostname = - splitHostPort(targetAuthority)?.host ?? 'localhost'; - // We want to always set servername to support SNI - connectionOptions.servername = authorityHostname; - } - if (proxyConnectionResult.socket) { - /* This is part of the workaround for - * https://github.com/nodejs/node/issues/32922. Without that bug, - * proxyConnectionResult.socket would always be a plaintext socket and - * this would say - * connectionOptions.socket = proxyConnectionResult.socket; */ - connectionOptions.createConnection = (authority, option) => { - return proxyConnectionResult.socket!; - }; - } - } else { - /* In all but the most recent versions of Node, http2.connect does not use - * the options when establishing plaintext connections, so we need to - * establish that connection explicitly. */ - connectionOptions.createConnection = (authority, option) => { - if (proxyConnectionResult.socket) { - return proxyConnectionResult.socket; - } else { - /* net.NetConnectOpts is declared in a way that is more restrictive - * than what net.connect will actually accept, so we use the type - * assertion to work around that. */ - return net.connect(this.subchannelAddress); - } - }; + private startConnectingInternal() { + let options = this.options; + if (options['grpc.keepalive_time_ms']) { + const adjustedKeepaliveTime = Math.min( + this.keepaliveTime, + KEEPALIVE_MAX_TIME_MS + ); + options = { ...options, 'grpc.keepalive_time_ms': adjustedKeepaliveTime }; } - - connectionOptions = { - ...connectionOptions, - ...this.subchannelAddress, - }; - - /* http2.connect uses the options here: - * https://github.com/nodejs/node/blob/70c32a6d190e2b5d7b9ff9d5b6a459d14e8b7d59/lib/internal/http2/core.js#L3028-L3036 - * The spread operator overides earlier values with later ones, so any port - * or host values in the options will be used rather than any values extracted - * from the first argument. In addition, the path overrides the host and port, - * as documented for plaintext connections here: - * https://nodejs.org/api/net.html#net_socket_connect_options_connectlistener - * and for TLS connections here: - * https://nodejs.org/api/tls.html#tls_tls_connect_options_callback. In - * earlier versions of Node, http2.connect passes these options to - * tls.connect but not net.connect, so in the insecure case we still need - * to set the createConnection option above to create the connection - * explicitly. We cannot do that in the TLS case because http2.connect - * passes necessary additional options to tls.connect. - * The first argument just needs to be parseable as a URL and the scheme - * determines whether the connection will be established over TLS or not. - */ - const session = http2.connect( - addressScheme + targetAuthority, - connectionOptions - ); - this.session = session; - session.unref(); - /* For all of these events, check if the session at the time of the event - * is the same one currently attached to this subchannel, to ensure that - * old events from previous connection attempts cannot cause invalid state - * transitions. */ - session.once('connect', () => { - if (this.session === session) { - this.transitionToState( - [ConnectivityState.CONNECTING], - ConnectivityState.READY - ); - } - }); - session.once('close', () => { - if (this.session === session) { - trace(this.subchannelAddressString + ' connection closed'); - this.transitionToState( - [ConnectivityState.CONNECTING], - ConnectivityState.TRANSIENT_FAILURE - ); - /* Transitioning directly to IDLE here should be OK because we are not - * doing any backoff, because a connection was established at some - * point */ - this.transitionToState( - [ConnectivityState.READY], - ConnectivityState.IDLE - ); - } - }); - session.once( - 'goaway', - (errorCode: number, lastStreamID: number, opaqueData: Buffer) => { - if (this.session === session) { - /* See the last paragraph of - * https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md#basic-keepalive */ + this.connector + .connect(this.subchannelAddress, this.credentials, options) + .then( + transport => { if ( - errorCode === http2.constants.NGHTTP2_ENHANCE_YOUR_CALM && - opaqueData.equals(tooManyPingsData) + this.transitionToState( + [ConnectivityState.CONNECTING], + ConnectivityState.READY + ) ) { - this.keepaliveTimeMs = Math.min( - 2 * this.keepaliveTimeMs, - KEEPALIVE_MAX_TIME_MS - ); - logging.log( - LogVerbosity.ERROR, - `Connection to ${uriToString(this.channelTarget)} at ${this.subchannelAddressString} rejected by server because of excess pings. Increasing ping interval to ${this.keepaliveTimeMs} ms` - ); + this.transport = transport; + if (this.channelzEnabled) { + this.childrenTracker.refChild(transport.getChannelzRef()); + } + transport.addDisconnectListener(tooManyPings => { + this.transitionToState( + [ConnectivityState.READY], + ConnectivityState.IDLE + ); + if (tooManyPings && this.keepaliveTime > 0) { + this.keepaliveTime *= 2; + logging.log( + LogVerbosity.ERROR, + `Connection to ${uriToString(this.channelTarget)} at ${ + this.subchannelAddressString + } rejected by server because of excess pings. Increasing ping interval to ${ + this.keepaliveTime + } ms` + ); + } + }); + } else { + /* If we can't transition from CONNECTING to READY here, we will + * not be using this transport, so release its resources. */ + transport.shutdown(); } - trace( - this.subchannelAddressString + - ' connection closed by GOAWAY with code ' + - errorCode - ); + }, + error => { this.transitionToState( - [ConnectivityState.CONNECTING, ConnectivityState.READY], - ConnectivityState.IDLE + [ConnectivityState.CONNECTING], + ConnectivityState.TRANSIENT_FAILURE, + `${error}` ); } - } - ); - session.once('error', (error) => { - /* Do nothing here. Any error should also trigger a close event, which is - * where we want to handle that. */ - trace( - this.subchannelAddressString + - ' connection closed with error ' + - (error as Error).message ); - }); - } - - private startConnectingInternal() { - /* Pass connection options through to the proxy so that it's able to - * upgrade it's connection to support tls if needed. - * This is a workaround for https://github.com/nodejs/node/issues/32922 - * See https://github.com/grpc/grpc-node/pull/1369 for more info. */ - const connectionOptions: ConnectionOptions = - this.credentials._getConnectionOptions() || {}; - - if ('secureContext' in connectionOptions) { - connectionOptions.ALPNProtocols = ['h2']; - // If provided, the value of grpc.ssl_target_name_override should be used - // to override the target hostname when checking server identity. - // This option is used for testing only. - if (this.options['grpc.ssl_target_name_override']) { - const sslTargetNameOverride = this.options[ - 'grpc.ssl_target_name_override' - ]!; - connectionOptions.checkServerIdentity = ( - host: string, - cert: PeerCertificate - ): Error | undefined => { - return checkServerIdentity(sslTargetNameOverride, cert); - }; - connectionOptions.servername = sslTargetNameOverride; - } else { - if ('grpc.http_connect_target' in this.options) { - /* This is more or less how servername will be set in createSession - * if a connection is successfully established through the proxy. - * If the proxy is not used, these connectionOptions are discarded - * anyway */ - const targetPath = getDefaultAuthority( - parseUri(this.options['grpc.http_connect_target'] as string) ?? { - path: 'localhost', - } - ); - const hostPort = splitHostPort(targetPath); - connectionOptions.servername = hostPort?.host ?? targetPath; - } - } - } - - getProxiedConnection( - this.subchannelAddress, - this.options, - connectionOptions - ).then( - (result) => { - this.createSession(result); - }, - (reason) => { - this.transitionToState( - [ConnectivityState.CONNECTING], - ConnectivityState.TRANSIENT_FAILURE - ); - } - ); } /** @@ -531,31 +284,28 @@ export class Subchannel { */ private transitionToState( oldStates: ConnectivityState[], - newState: ConnectivityState + newState: ConnectivityState, + errorMessage?: string ): boolean { if (oldStates.indexOf(this.connectivityState) === -1) { return false; } - trace( - this.subchannelAddressString + - ' ' + - ConnectivityState[this.connectivityState] + + this.trace( + ConnectivityState[this.connectivityState] + ' -> ' + ConnectivityState[newState] ); + if (this.channelzEnabled) { + this.channelzTrace.addTrace( + 'CT_INFO', + 'Connectivity state change to ' + ConnectivityState[newState] + ); + } const previousState = this.connectivityState; this.connectivityState = newState; switch (newState) { case ConnectivityState.READY: this.stopBackoff(); - this.session!.socket.once('close', () => { - for (const listener of this.disconnectListeners) { - listener(); - } - }); - if (this.keepaliveWithoutCalls) { - this.startKeepalivePings(); - } break; case ConnectivityState.CONNECTING: this.startBackoff(); @@ -563,11 +313,11 @@ export class Subchannel { this.continueConnecting = false; break; case ConnectivityState.TRANSIENT_FAILURE: - if (this.session) { - this.session.close(); + if (this.channelzEnabled && this.transport) { + this.childrenTracker.unrefChild(this.transport.getChannelzRef()); } - this.session = null; - this.stopKeepalivePings(); + this.transport?.shutdown(); + this.transport = null; /* If the backoff timer has already ended by the time we get to the * TRANSIENT_FAILURE state, we want to immediately transition out of * TRANSIENT_FAILURE as though the backoff timer is ending right now */ @@ -578,103 +328,39 @@ export class Subchannel { } break; case ConnectivityState.IDLE: - if (this.session) { - this.session.close(); + if (this.channelzEnabled && this.transport) { + this.childrenTracker.unrefChild(this.transport.getChannelzRef()); } - this.session = null; - this.stopKeepalivePings(); + this.transport?.shutdown(); + this.transport = null; break; default: throw new Error(`Invalid state: unknown ConnectivityState ${newState}`); } - /* We use a shallow copy of the stateListeners array in case a listener - * is removed during this iteration */ - for (const listener of [...this.stateListeners]) { - listener(this, previousState, newState); + for (const listener of this.stateListeners) { + listener(this, previousState, newState, this.keepaliveTime, errorMessage); } return true; } - /** - * Check if the subchannel associated with zero calls and with zero channels. - * If so, shut it down. - */ - private checkBothRefcounts() { - /* If no calls, channels, or subchannel pools have any more references to - * this subchannel, we can be sure it will never be used again. */ - if (this.callRefcount === 0 && this.refcount === 0) { - this.transitionToState( - [ - ConnectivityState.CONNECTING, - ConnectivityState.READY, - ], - ConnectivityState.TRANSIENT_FAILURE - ); - } - } - - callRef() { - refTrace( - this.subchannelAddressString + - ' callRefcount ' + - this.callRefcount + - ' -> ' + - (this.callRefcount + 1) - ); - if (this.callRefcount === 0) { - if (this.session) { - this.session.ref(); - } - this.backoffTimeout.ref(); - if (!this.keepaliveWithoutCalls) { - this.startKeepalivePings(); - } - } - this.callRefcount += 1; - } - - callUnref() { - refTrace( - this.subchannelAddressString + - ' callRefcount ' + - this.callRefcount + - ' -> ' + - (this.callRefcount - 1) - ); - this.callRefcount -= 1; - if (this.callRefcount === 0) { - if (this.session) { - this.session.unref(); - } - this.backoffTimeout.unref(); - if (!this.keepaliveWithoutCalls) { - this.stopKeepalivePings(); - } - this.checkBothRefcounts(); - } - } - ref() { - refTrace( - this.subchannelAddressString + - ' refcount ' + - this.refcount + - ' -> ' + - (this.refcount + 1) - ); + this.refTrace('refcount ' + this.refcount + ' -> ' + (this.refcount + 1)); this.refcount += 1; } unref() { - refTrace( - this.subchannelAddressString + - ' refcount ' + - this.refcount + - ' -> ' + - (this.refcount - 1) - ); + this.refTrace('refcount ' + this.refcount + ' -> ' + (this.refcount - 1)); this.refcount -= 1; - this.checkBothRefcounts(); + if (this.refcount === 0) { + this.channelzTrace.addTrace('CT_INFO', 'Shutting down'); + unregisterChannelzRef(this.channelzRef); + process.nextTick(() => { + this.transitionToState( + [ConnectivityState.CONNECTING, ConnectivityState.READY], + ConnectivityState.IDLE + ); + }); + } } unrefIfOneRef(): boolean { @@ -685,49 +371,38 @@ export class Subchannel { return false; } - /** - * Start a stream on the current session with the given `metadata` as headers - * and then attach it to the `callStream`. Must only be called if the - * subchannel's current connectivity state is READY. - * @param metadata - * @param callStream - */ - startCallStream( + createCall( metadata: Metadata, - callStream: Http2CallStream, - extraFilterFactory?: FilterFactory - ) { - const headers = metadata.toHttp2Headers(); - headers[HTTP2_HEADER_AUTHORITY] = callStream.getHost(); - headers[HTTP2_HEADER_USER_AGENT] = this.userAgent; - headers[HTTP2_HEADER_CONTENT_TYPE] = 'application/grpc'; - headers[HTTP2_HEADER_METHOD] = 'POST'; - headers[HTTP2_HEADER_PATH] = callStream.getMethod(); - headers[HTTP2_HEADER_TE] = 'trailers'; - let http2Stream: http2.ClientHttp2Stream; - /* In theory, if an error is thrown by session.request because session has - * become unusable (e.g. because it has received a goaway), this subchannel - * should soon see the corresponding close or goaway event anyway and leave - * READY. But we have seen reports that this does not happen - * (https://github.com/googleapis/nodejs-firestore/issues/1023#issuecomment-653204096) - * so for defense in depth, we just discard the session when we see an - * error here. - */ - try { - http2Stream = this.session!.request(headers); - } catch (e) { - this.transitionToState( - [ConnectivityState.READY], - ConnectivityState.TRANSIENT_FAILURE - ); - throw e; + host: string, + method: string, + listener: SubchannelCallInterceptingListener + ): SubchannelCall { + if (!this.transport) { + throw new Error('Cannot create call, subchannel not READY'); } - let headersString = ''; - for (const header of Object.keys(headers)) { - headersString += '\t\t' + header + ': ' + headers[header] + '\n'; + let statsTracker: Partial; + if (this.channelzEnabled) { + this.callTracker.addCallStarted(); + this.streamTracker.addCallStarted(); + statsTracker = { + onCallEnd: status => { + if (status.code === Status.OK) { + this.callTracker.addCallSucceeded(); + } else { + this.callTracker.addCallFailed(); + } + }, + }; + } else { + statsTracker = {}; } - logging.trace(LogVerbosity.DEBUG, 'call_stream', 'Starting stream on subchannel ' + this.subchannelAddressString + ' with headers\n' + headersString); - callStream.attachHttp2Stream(http2Stream, this, extraFilterFactory); + return this.transport.createCall( + metadata, + host, + method, + listener, + statsTracker + ); } /** @@ -737,20 +412,22 @@ export class Subchannel { * Otherwise, do nothing. */ startConnecting() { - /* First, try to transition from IDLE to connecting. If that doesn't happen - * because the state is not currently IDLE, check if it is - * TRANSIENT_FAILURE, and if so indicate that it should go back to - * connecting after the backoff timer ends. Otherwise do nothing */ - if ( - !this.transitionToState( - [ConnectivityState.IDLE], - ConnectivityState.CONNECTING - ) - ) { - if (this.connectivityState === ConnectivityState.TRANSIENT_FAILURE) { - this.continueConnecting = true; + process.nextTick(() => { + /* First, try to transition from IDLE to connecting. If that doesn't happen + * because the state is not currently IDLE, check if it is + * TRANSIENT_FAILURE, and if so indicate that it should go back to + * connecting after the backoff timer ends. Otherwise do nothing */ + if ( + !this.transitionToState( + [ConnectivityState.IDLE], + ConnectivityState.CONNECTING + ) + ) { + if (this.connectivityState === ConnectivityState.TRANSIENT_FAILURE) { + this.continueConnecting = true; + } } - } + }); } /** @@ -766,7 +443,7 @@ export class Subchannel { * @param listener */ addConnectivityStateListener(listener: ConnectivityStateListener) { - this.stateListeners.push(listener); + this.stateListeners.add(listener); } /** @@ -775,35 +452,53 @@ export class Subchannel { * `addConnectivityStateListener` */ removeConnectivityStateListener(listener: ConnectivityStateListener) { - const listenerIndex = this.stateListeners.indexOf(listener); - if (listenerIndex > -1) { - this.stateListeners.splice(listenerIndex, 1); - } - } - - addDisconnectListener(listener: () => void) { - this.disconnectListeners.push(listener); - } - - removeDisconnectListener(listener: () => void) { - const listenerIndex = this.disconnectListeners.indexOf(listener); - if (listenerIndex > -1) { - this.disconnectListeners.splice(listenerIndex, 1); - } + this.stateListeners.delete(listener); } /** * Reset the backoff timeout, and immediately start connecting if in backoff. */ resetBackoff() { - this.backoffTimeout.reset(); - this.transitionToState( - [ConnectivityState.TRANSIENT_FAILURE], - ConnectivityState.CONNECTING - ); + process.nextTick(() => { + this.backoffTimeout.reset(); + this.transitionToState( + [ConnectivityState.TRANSIENT_FAILURE], + ConnectivityState.CONNECTING + ); + }); } getAddress(): string { return this.subchannelAddressString; } + + getChannelzRef(): SubchannelRef { + return this.channelzRef; + } + + isHealthy(): boolean { + return true; + } + + addHealthStateWatcher(listener: (healthy: boolean) => void): void { + // Do nothing with the listener + } + + removeHealthStateWatcher(listener: (healthy: boolean) => void): void { + // Do nothing with the listener + } + + getRealSubchannel(): this { + return this; + } + + realSubchannelEquals(other: SubchannelInterface): boolean { + return other.getRealSubchannel() === this; + } + + throttleKeepalive(newKeepaliveTime: number) { + if (newKeepaliveTime > this.keepaliveTime) { + this.keepaliveTime = newKeepaliveTime; + } + } } diff --git a/packages/grpc-js/src/transport.ts b/packages/grpc-js/src/transport.ts new file mode 100644 index 000000000..934b62111 --- /dev/null +++ b/packages/grpc-js/src/transport.ts @@ -0,0 +1,853 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import * as http2 from 'http2'; +import { + checkServerIdentity, + CipherNameAndProtocol, + ConnectionOptions, + PeerCertificate, + TLSSocket, +} from 'tls'; +import { PartialStatusObject } from './call-interface'; +import { ChannelCredentials } from './channel-credentials'; +import { ChannelOptions } from './channel-options'; +import { + ChannelzCallTracker, + ChannelzCallTrackerStub, + registerChannelzSocket, + SocketInfo, + SocketRef, + TlsInfo, + unregisterChannelzRef, +} from './channelz'; +import { LogVerbosity } from './constants'; +import { getProxiedConnection, ProxyConnectionResult } from './http_proxy'; +import * as logging from './logging'; +import { getDefaultAuthority } from './resolver'; +import { + stringToSubchannelAddress, + SubchannelAddress, + subchannelAddressToString, +} from './subchannel-address'; +import { GrpcUri, parseUri, splitHostPort, uriToString } from './uri-parser'; +import * as net from 'net'; +import { + Http2SubchannelCall, + SubchannelCall, + SubchannelCallInterceptingListener, +} from './subchannel-call'; +import { Metadata } from './metadata'; +import { getNextCallNumber } from './call-number'; + +const TRACER_NAME = 'transport'; +const FLOW_CONTROL_TRACER_NAME = 'transport_flowctrl'; + +const clientVersion = require('../../package.json').version; + +const { + HTTP2_HEADER_AUTHORITY, + HTTP2_HEADER_CONTENT_TYPE, + HTTP2_HEADER_METHOD, + HTTP2_HEADER_PATH, + HTTP2_HEADER_TE, + HTTP2_HEADER_USER_AGENT, +} = http2.constants; + +const KEEPALIVE_TIMEOUT_MS = 20000; + +export interface CallEventTracker { + addMessageSent(): void; + addMessageReceived(): void; + onCallEnd(status: PartialStatusObject): void; + onStreamEnd(success: boolean): void; +} + +export interface TransportDisconnectListener { + (tooManyPings: boolean): void; +} + +export interface Transport { + getChannelzRef(): SocketRef; + getPeerName(): string; + getOptions(): ChannelOptions; + createCall( + metadata: Metadata, + host: string, + method: string, + listener: SubchannelCallInterceptingListener, + subchannelCallStatsTracker: Partial + ): SubchannelCall; + addDisconnectListener(listener: TransportDisconnectListener): void; + shutdown(): void; +} + +const tooManyPingsData: Buffer = Buffer.from('too_many_pings', 'ascii'); + +class Http2Transport implements Transport { + /** + * The amount of time in between sending pings + */ + private keepaliveTimeMs = -1; + /** + * The amount of time to wait for an acknowledgement after sending a ping + */ + private keepaliveTimeoutMs: number = KEEPALIVE_TIMEOUT_MS; + /** + * Timer reference for timeout that indicates when to send the next ping + */ + private keepaliveTimerId: NodeJS.Timeout | null = null; + /** + * Indicates that the keepalive timer ran out while there were no active + * calls, and a ping should be sent the next time a call starts. + */ + private pendingSendKeepalivePing = false; + /** + * Timer reference tracking when the most recent ping will be considered lost + */ + private keepaliveTimeoutId: NodeJS.Timeout | null = null; + /** + * Indicates whether keepalive pings should be sent without any active calls + */ + private keepaliveWithoutCalls = false; + + private userAgent: string; + + private activeCalls: Set = new Set(); + + private subchannelAddressString: string; + + private disconnectListeners: TransportDisconnectListener[] = []; + + private disconnectHandled = false; + + // Channelz info + private channelzRef: SocketRef; + private readonly channelzEnabled: boolean = true; + private streamTracker: ChannelzCallTracker | ChannelzCallTrackerStub; + private keepalivesSent = 0; + private messagesSent = 0; + private messagesReceived = 0; + private lastMessageSentTimestamp: Date | null = null; + private lastMessageReceivedTimestamp: Date | null = null; + + constructor( + private session: http2.ClientHttp2Session, + subchannelAddress: SubchannelAddress, + private options: ChannelOptions, + /** + * Name of the remote server, if it is not the same as the subchannel + * address, i.e. if connecting through an HTTP CONNECT proxy. + */ + private remoteName: string | null + ) { + /* Populate subchannelAddressString and channelzRef before doing anything + * else, because they are used in the trace methods. */ + this.subchannelAddressString = subchannelAddressToString(subchannelAddress); + + if (options['grpc.enable_channelz'] === 0) { + this.channelzEnabled = false; + this.streamTracker = new ChannelzCallTrackerStub(); + } else { + this.streamTracker = new ChannelzCallTracker(); + } + + this.channelzRef = registerChannelzSocket( + this.subchannelAddressString, + () => this.getChannelzInfo(), + this.channelzEnabled + ); + + // Build user-agent string. + this.userAgent = [ + options['grpc.primary_user_agent'], + `grpc-node-js/${clientVersion}`, + options['grpc.secondary_user_agent'], + ] + .filter(e => e) + .join(' '); // remove falsey values first + + if ('grpc.keepalive_time_ms' in options) { + this.keepaliveTimeMs = options['grpc.keepalive_time_ms']!; + } + if ('grpc.keepalive_timeout_ms' in options) { + this.keepaliveTimeoutMs = options['grpc.keepalive_timeout_ms']!; + } + if ('grpc.keepalive_permit_without_calls' in options) { + this.keepaliveWithoutCalls = + options['grpc.keepalive_permit_without_calls'] === 1; + } else { + this.keepaliveWithoutCalls = false; + } + + session.once('close', () => { + this.trace('session closed'); + this.stopKeepalivePings(); + this.handleDisconnect(); + }); + + session.once( + 'goaway', + (errorCode: number, lastStreamID: number, opaqueData?: Buffer) => { + let tooManyPings = false; + /* See the last paragraph of + * https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md#basic-keepalive */ + if ( + errorCode === http2.constants.NGHTTP2_ENHANCE_YOUR_CALM && + opaqueData && + opaqueData.equals(tooManyPingsData) + ) { + tooManyPings = true; + } + this.trace( + 'connection closed by GOAWAY with code ' + + errorCode + + ' and data ' + + opaqueData?.toString() + ); + this.reportDisconnectToOwner(tooManyPings); + } + ); + + session.once('error', error => { + /* Do nothing here. Any error should also trigger a close event, which is + * where we want to handle that. */ + this.trace('connection closed with error ' + (error as Error).message); + }); + + if (logging.isTracerEnabled(TRACER_NAME)) { + session.on('remoteSettings', (settings: http2.Settings) => { + this.trace( + 'new settings received' + + (this.session !== session ? ' on the old connection' : '') + + ': ' + + JSON.stringify(settings) + ); + }); + session.on('localSettings', (settings: http2.Settings) => { + this.trace( + 'local settings acknowledged by remote' + + (this.session !== session ? ' on the old connection' : '') + + ': ' + + JSON.stringify(settings) + ); + }); + } + + /* Start the keepalive timer last, because this can trigger trace logs, + * which should only happen after everything else is set up. */ + if (this.keepaliveWithoutCalls) { + this.maybeStartKeepalivePingTimer(); + } + } + + private getChannelzInfo(): SocketInfo { + const sessionSocket = this.session.socket; + const remoteAddress = sessionSocket.remoteAddress + ? stringToSubchannelAddress( + sessionSocket.remoteAddress, + sessionSocket.remotePort + ) + : null; + const localAddress = sessionSocket.localAddress + ? stringToSubchannelAddress( + sessionSocket.localAddress, + sessionSocket.localPort + ) + : null; + let tlsInfo: TlsInfo | null; + if (this.session.encrypted) { + const tlsSocket: TLSSocket = sessionSocket as TLSSocket; + const cipherInfo: CipherNameAndProtocol & { standardName?: string } = + tlsSocket.getCipher(); + const certificate = tlsSocket.getCertificate(); + const peerCertificate = tlsSocket.getPeerCertificate(); + tlsInfo = { + cipherSuiteStandardName: cipherInfo.standardName ?? null, + cipherSuiteOtherName: cipherInfo.standardName ? null : cipherInfo.name, + localCertificate: + certificate && 'raw' in certificate ? certificate.raw : null, + remoteCertificate: + peerCertificate && 'raw' in peerCertificate + ? peerCertificate.raw + : null, + }; + } else { + tlsInfo = null; + } + const socketInfo: SocketInfo = { + remoteAddress: remoteAddress, + localAddress: localAddress, + security: tlsInfo, + remoteName: this.remoteName, + streamsStarted: this.streamTracker.callsStarted, + streamsSucceeded: this.streamTracker.callsSucceeded, + streamsFailed: this.streamTracker.callsFailed, + messagesSent: this.messagesSent, + messagesReceived: this.messagesReceived, + keepAlivesSent: this.keepalivesSent, + lastLocalStreamCreatedTimestamp: + this.streamTracker.lastCallStartedTimestamp, + lastRemoteStreamCreatedTimestamp: null, + lastMessageSentTimestamp: this.lastMessageSentTimestamp, + lastMessageReceivedTimestamp: this.lastMessageReceivedTimestamp, + localFlowControlWindow: this.session.state.localWindowSize ?? null, + remoteFlowControlWindow: this.session.state.remoteWindowSize ?? null, + }; + return socketInfo; + } + + private trace(text: string): void { + logging.trace( + LogVerbosity.DEBUG, + TRACER_NAME, + '(' + + this.channelzRef.id + + ') ' + + this.subchannelAddressString + + ' ' + + text + ); + } + + private keepaliveTrace(text: string): void { + logging.trace( + LogVerbosity.DEBUG, + 'keepalive', + '(' + + this.channelzRef.id + + ') ' + + this.subchannelAddressString + + ' ' + + text + ); + } + + private flowControlTrace(text: string): void { + logging.trace( + LogVerbosity.DEBUG, + FLOW_CONTROL_TRACER_NAME, + '(' + + this.channelzRef.id + + ') ' + + this.subchannelAddressString + + ' ' + + text + ); + } + + private internalsTrace(text: string): void { + logging.trace( + LogVerbosity.DEBUG, + 'transport_internals', + '(' + + this.channelzRef.id + + ') ' + + this.subchannelAddressString + + ' ' + + text + ); + } + + /** + * Indicate to the owner of this object that this transport should no longer + * be used. That happens if the connection drops, or if the server sends a + * GOAWAY. + * @param tooManyPings If true, this was triggered by a GOAWAY with data + * indicating that the session was closed becaues the client sent too many + * pings. + * @returns + */ + private reportDisconnectToOwner(tooManyPings: boolean) { + if (this.disconnectHandled) { + return; + } + this.disconnectHandled = true; + this.disconnectListeners.forEach(listener => listener(tooManyPings)); + } + + /** + * Handle connection drops, but not GOAWAYs. + */ + private handleDisconnect() { + this.reportDisconnectToOwner(false); + /* Give calls an event loop cycle to finish naturally before reporting the + * disconnnection to them. */ + setImmediate(() => { + for (const call of this.activeCalls) { + call.onDisconnect(); + } + }); + } + + addDisconnectListener(listener: TransportDisconnectListener): void { + this.disconnectListeners.push(listener); + } + + private clearKeepaliveTimer() { + if (!this.keepaliveTimerId) { + return; + } + clearTimeout(this.keepaliveTimerId); + this.keepaliveTimerId = null; + } + + private clearKeepaliveTimeout() { + if (!this.keepaliveTimeoutId) { + return; + } + clearTimeout(this.keepaliveTimeoutId); + this.keepaliveTimeoutId = null; + } + + private canSendPing() { + return ( + this.keepaliveTimeMs > 0 && + (this.keepaliveWithoutCalls || this.activeCalls.size > 0) + ); + } + + private maybeSendPing() { + this.clearKeepaliveTimer(); + if (!this.canSendPing()) { + this.pendingSendKeepalivePing = true; + return; + } + if (this.channelzEnabled) { + this.keepalivesSent += 1; + } + this.keepaliveTrace( + 'Sending ping with timeout ' + this.keepaliveTimeoutMs + 'ms' + ); + if (!this.keepaliveTimeoutId) { + this.keepaliveTimeoutId = setTimeout(() => { + this.keepaliveTrace('Ping timeout passed without response'); + this.handleDisconnect(); + }, this.keepaliveTimeoutMs); + this.keepaliveTimeoutId.unref?.(); + } + try { + this.session!.ping( + (err: Error | null, duration: number, payload: Buffer) => { + if (err) { + this.keepaliveTrace('Ping failed with error ' + err.message); + this.handleDisconnect(); + } + this.keepaliveTrace('Received ping response'); + this.clearKeepaliveTimeout(); + this.maybeStartKeepalivePingTimer(); + } + ); + } catch (e) { + /* If we fail to send a ping, the connection is no longer functional, so + * we should discard it. */ + this.handleDisconnect(); + } + } + + /** + * Starts the keepalive ping timer if appropriate. If the timer already ran + * out while there were no active requests, instead send a ping immediately. + * If the ping timer is already running or a ping is currently in flight, + * instead do nothing and wait for them to resolve. + */ + private maybeStartKeepalivePingTimer() { + if (!this.canSendPing()) { + return; + } + if (this.pendingSendKeepalivePing) { + this.pendingSendKeepalivePing = false; + this.maybeSendPing(); + } else if (!this.keepaliveTimerId && !this.keepaliveTimeoutId) { + this.keepaliveTrace( + 'Starting keepalive timer for ' + this.keepaliveTimeMs + 'ms' + ); + this.keepaliveTimerId = setTimeout(() => { + this.maybeSendPing(); + }, this.keepaliveTimeMs); + this.keepaliveTimerId.unref?.(); + } + /* Otherwise, there is already either a keepalive timer or a ping pending, + * wait for those to resolve. */ + } + + private stopKeepalivePings() { + if (this.keepaliveTimerId) { + clearTimeout(this.keepaliveTimerId); + this.keepaliveTimerId = null; + } + this.clearKeepaliveTimeout(); + } + + private removeActiveCall(call: Http2SubchannelCall) { + this.activeCalls.delete(call); + if (this.activeCalls.size === 0) { + this.session.unref(); + } + } + + private addActiveCall(call: Http2SubchannelCall) { + this.activeCalls.add(call); + if (this.activeCalls.size === 1) { + this.session.ref(); + if (!this.keepaliveWithoutCalls) { + this.maybeStartKeepalivePingTimer(); + } + } + } + + createCall( + metadata: Metadata, + host: string, + method: string, + listener: SubchannelCallInterceptingListener, + subchannelCallStatsTracker: Partial + ): Http2SubchannelCall { + const headers = metadata.toHttp2Headers(); + headers[HTTP2_HEADER_AUTHORITY] = host; + headers[HTTP2_HEADER_USER_AGENT] = this.userAgent; + headers[HTTP2_HEADER_CONTENT_TYPE] = 'application/grpc'; + headers[HTTP2_HEADER_METHOD] = 'POST'; + headers[HTTP2_HEADER_PATH] = method; + headers[HTTP2_HEADER_TE] = 'trailers'; + let http2Stream: http2.ClientHttp2Stream; + /* In theory, if an error is thrown by session.request because session has + * become unusable (e.g. because it has received a goaway), this subchannel + * should soon see the corresponding close or goaway event anyway and leave + * READY. But we have seen reports that this does not happen + * (https://github.com/googleapis/nodejs-firestore/issues/1023#issuecomment-653204096) + * so for defense in depth, we just discard the session when we see an + * error here. + */ + try { + http2Stream = this.session!.request(headers); + } catch (e) { + this.handleDisconnect(); + throw e; + } + this.flowControlTrace( + 'local window size: ' + + this.session.state.localWindowSize + + ' remote window size: ' + + this.session.state.remoteWindowSize + ); + this.internalsTrace( + 'session.closed=' + + this.session.closed + + ' session.destroyed=' + + this.session.destroyed + + ' session.socket.destroyed=' + + this.session.socket.destroyed + ); + let eventTracker: CallEventTracker; + // eslint-disable-next-line prefer-const + let call: Http2SubchannelCall; + if (this.channelzEnabled) { + this.streamTracker.addCallStarted(); + eventTracker = { + addMessageSent: () => { + this.messagesSent += 1; + this.lastMessageSentTimestamp = new Date(); + subchannelCallStatsTracker.addMessageSent?.(); + }, + addMessageReceived: () => { + this.messagesReceived += 1; + this.lastMessageReceivedTimestamp = new Date(); + subchannelCallStatsTracker.addMessageReceived?.(); + }, + onCallEnd: status => { + subchannelCallStatsTracker.onCallEnd?.(status); + this.removeActiveCall(call); + }, + onStreamEnd: success => { + if (success) { + this.streamTracker.addCallSucceeded(); + } else { + this.streamTracker.addCallFailed(); + } + subchannelCallStatsTracker.onStreamEnd?.(success); + }, + }; + } else { + eventTracker = { + addMessageSent: () => { + subchannelCallStatsTracker.addMessageSent?.(); + }, + addMessageReceived: () => { + subchannelCallStatsTracker.addMessageReceived?.(); + }, + onCallEnd: status => { + subchannelCallStatsTracker.onCallEnd?.(status); + this.removeActiveCall(call); + }, + onStreamEnd: success => { + subchannelCallStatsTracker.onStreamEnd?.(success); + }, + }; + } + call = new Http2SubchannelCall( + http2Stream, + eventTracker, + listener, + this, + getNextCallNumber() + ); + this.addActiveCall(call); + return call; + } + + getChannelzRef(): SocketRef { + return this.channelzRef; + } + + getPeerName() { + return this.subchannelAddressString; + } + + getOptions() { + return this.options; + } + + shutdown() { + this.session.close(); + unregisterChannelzRef(this.channelzRef); + } +} + +export interface SubchannelConnector { + connect( + address: SubchannelAddress, + credentials: ChannelCredentials, + options: ChannelOptions + ): Promise; + shutdown(): void; +} + +export class Http2SubchannelConnector implements SubchannelConnector { + private session: http2.ClientHttp2Session | null = null; + private isShutdown = false; + constructor(private channelTarget: GrpcUri) {} + + private trace(text: string) { + logging.trace( + LogVerbosity.DEBUG, + TRACER_NAME, + uriToString(this.channelTarget) + ' ' + text + ); + } + + private createSession( + address: SubchannelAddress, + credentials: ChannelCredentials, + options: ChannelOptions, + proxyConnectionResult: ProxyConnectionResult + ): Promise { + if (this.isShutdown) { + return Promise.reject(); + } + + return new Promise((resolve, reject) => { + let remoteName: string | null; + if (proxyConnectionResult.realTarget) { + remoteName = uriToString(proxyConnectionResult.realTarget); + this.trace( + 'creating HTTP/2 session through proxy to ' + + uriToString(proxyConnectionResult.realTarget) + ); + } else { + remoteName = null; + this.trace( + 'creating HTTP/2 session to ' + subchannelAddressToString(address) + ); + } + const targetAuthority = getDefaultAuthority( + proxyConnectionResult.realTarget ?? this.channelTarget + ); + let connectionOptions: http2.SecureClientSessionOptions = + credentials._getConnectionOptions() || {}; + connectionOptions.maxSendHeaderBlockLength = Number.MAX_SAFE_INTEGER; + if ('grpc-node.max_session_memory' in options) { + connectionOptions.maxSessionMemory = + options['grpc-node.max_session_memory']; + } else { + /* By default, set a very large max session memory limit, to effectively + * disable enforcement of the limit. Some testing indicates that Node's + * behavior degrades badly when this limit is reached, so we solve that + * by disabling the check entirely. */ + connectionOptions.maxSessionMemory = Number.MAX_SAFE_INTEGER; + } + let addressScheme = 'http://'; + if ('secureContext' in connectionOptions) { + addressScheme = 'https://'; + // If provided, the value of grpc.ssl_target_name_override should be used + // to override the target hostname when checking server identity. + // This option is used for testing only. + if (options['grpc.ssl_target_name_override']) { + const sslTargetNameOverride = + options['grpc.ssl_target_name_override']!; + const originalCheckServerIdentity = + connectionOptions.checkServerIdentity ?? checkServerIdentity; + connectionOptions.checkServerIdentity = ( + host: string, + cert: PeerCertificate + ): Error | undefined => { + return originalCheckServerIdentity(sslTargetNameOverride, cert); + }; + connectionOptions.servername = sslTargetNameOverride; + } else { + const authorityHostname = + splitHostPort(targetAuthority)?.host ?? 'localhost'; + // We want to always set servername to support SNI + connectionOptions.servername = authorityHostname; + } + if (proxyConnectionResult.socket) { + /* This is part of the workaround for + * https://github.com/nodejs/node/issues/32922. Without that bug, + * proxyConnectionResult.socket would always be a plaintext socket and + * this would say + * connectionOptions.socket = proxyConnectionResult.socket; */ + connectionOptions.createConnection = (authority, option) => { + return proxyConnectionResult.socket!; + }; + } + } else { + /* In all but the most recent versions of Node, http2.connect does not use + * the options when establishing plaintext connections, so we need to + * establish that connection explicitly. */ + connectionOptions.createConnection = (authority, option) => { + if (proxyConnectionResult.socket) { + return proxyConnectionResult.socket; + } else { + /* net.NetConnectOpts is declared in a way that is more restrictive + * than what net.connect will actually accept, so we use the type + * assertion to work around that. */ + return net.connect(address); + } + }; + } + + connectionOptions = { + ...connectionOptions, + ...address, + enableTrace: options['grpc-node.tls_enable_trace'] === 1, + }; + + /* http2.connect uses the options here: + * https://github.com/nodejs/node/blob/70c32a6d190e2b5d7b9ff9d5b6a459d14e8b7d59/lib/internal/http2/core.js#L3028-L3036 + * The spread operator overides earlier values with later ones, so any port + * or host values in the options will be used rather than any values extracted + * from the first argument. In addition, the path overrides the host and port, + * as documented for plaintext connections here: + * https://nodejs.org/api/net.html#net_socket_connect_options_connectlistener + * and for TLS connections here: + * https://nodejs.org/api/tls.html#tls_tls_connect_options_callback. In + * earlier versions of Node, http2.connect passes these options to + * tls.connect but not net.connect, so in the insecure case we still need + * to set the createConnection option above to create the connection + * explicitly. We cannot do that in the TLS case because http2.connect + * passes necessary additional options to tls.connect. + * The first argument just needs to be parseable as a URL and the scheme + * determines whether the connection will be established over TLS or not. + */ + const session = http2.connect( + addressScheme + targetAuthority, + connectionOptions + ); + this.session = session; + let errorMessage = 'Failed to connect'; + session.unref(); + session.once('connect', () => { + session.removeAllListeners(); + resolve(new Http2Transport(session, address, options, remoteName)); + this.session = null; + }); + session.once('close', () => { + this.session = null; + // Leave time for error event to happen before rejecting + setImmediate(() => { + reject(`${errorMessage} (${new Date().toISOString()})`); + }); + }); + session.once('error', error => { + errorMessage = (error as Error).message; + this.trace('connection failed with error ' + errorMessage); + }); + }); + } + + connect( + address: SubchannelAddress, + credentials: ChannelCredentials, + options: ChannelOptions + ): Promise { + if (this.isShutdown) { + return Promise.reject(); + } + /* Pass connection options through to the proxy so that it's able to + * upgrade it's connection to support tls if needed. + * This is a workaround for https://github.com/nodejs/node/issues/32922 + * See https://github.com/grpc/grpc-node/pull/1369 for more info. */ + const connectionOptions: ConnectionOptions = + credentials._getConnectionOptions() || {}; + + if ('secureContext' in connectionOptions) { + connectionOptions.ALPNProtocols = ['h2']; + // If provided, the value of grpc.ssl_target_name_override should be used + // to override the target hostname when checking server identity. + // This option is used for testing only. + if (options['grpc.ssl_target_name_override']) { + const sslTargetNameOverride = options['grpc.ssl_target_name_override']!; + const originalCheckServerIdentity = + connectionOptions.checkServerIdentity ?? checkServerIdentity; + connectionOptions.checkServerIdentity = ( + host: string, + cert: PeerCertificate + ): Error | undefined => { + return originalCheckServerIdentity(sslTargetNameOverride, cert); + }; + connectionOptions.servername = sslTargetNameOverride; + } else { + if ('grpc.http_connect_target' in options) { + /* This is more or less how servername will be set in createSession + * if a connection is successfully established through the proxy. + * If the proxy is not used, these connectionOptions are discarded + * anyway */ + const targetPath = getDefaultAuthority( + parseUri(options['grpc.http_connect_target'] as string) ?? { + path: 'localhost', + } + ); + const hostPort = splitHostPort(targetPath); + connectionOptions.servername = hostPort?.host ?? targetPath; + } + } + if (options['grpc-node.tls_enable_trace']) { + connectionOptions.enableTrace = true; + } + } + + return getProxiedConnection(address, options, connectionOptions).then( + result => this.createSession(address, credentials, options, result) + ); + } + + shutdown(): void { + this.isShutdown = true; + this.session?.close(); + this.session = null; + } +} diff --git a/packages/grpc-js/src/uri-parser.ts b/packages/grpc-js/src/uri-parser.ts index 20c3d53b3..2b2efeca0 100644 --- a/packages/grpc-js/src/uri-parser.ts +++ b/packages/grpc-js/src/uri-parser.ts @@ -101,6 +101,19 @@ export function splitHostPort(path: string): HostPort | null { } } +export function combineHostPort(hostPort: HostPort): string { + if (hostPort.port === undefined) { + return hostPort.host; + } else { + // Only an IPv6 host should include a colon + if (hostPort.host.includes(':')) { + return `[${hostPort.host}]:${hostPort.port}`; + } else { + return `${hostPort.host}:${hostPort.port}`; + } + } +} + export function uriToString(uri: GrpcUri): string { let result = ''; if (uri.scheme !== undefined) { diff --git a/packages/grpc-js/test/assert2.ts b/packages/grpc-js/test/assert2.ts new file mode 100644 index 000000000..d3912a928 --- /dev/null +++ b/packages/grpc-js/test/assert2.ts @@ -0,0 +1,93 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import * as assert from 'assert'; + +const toCall = new Map<() => void, number>(); +const afterCallsQueue: Array<() => void> = []; + +/** + * Assert that the given function doesn't throw an error, and then return + * its value. + * @param fn The function to evaluate. + */ +export function noThrowAndReturn(fn: () => T): T { + try { + return fn(); + } catch (e) { + assert.throws(() => { + throw e; + }); + throw e; // for type safety only + } +} + +/** + * Helper function that returns true when every function wrapped with + * mustCall has been called. + */ +function mustCallsSatisfied(): boolean { + let result = true; + toCall.forEach(value => { + result = result && value === 0; + }); + return result; +} + +export function clearMustCalls(): void { + afterCallsQueue.length = 0; +} + +/** + * Wraps a function to keep track of whether it was called or not. + * @param fn The function to wrap. + */ +// tslint:disable:no-any +export function mustCall(fn: (...args: any[]) => T): (...args: any[]) => T { + const existingValue = toCall.get(fn); + if (existingValue !== undefined) { + toCall.set(fn, existingValue + 1); + } else { + toCall.set(fn, 1); + } + return (...args: any[]) => { + const result = fn(...args); + const existingValue = toCall.get(fn); + if (existingValue !== undefined) { + toCall.set(fn, existingValue - 1); + } + if (mustCallsSatisfied()) { + afterCallsQueue.forEach(fn => fn()); + afterCallsQueue.length = 0; + } + return result; + }; +} + +/** + * Calls the given function when every function that was wrapped with + * mustCall has been called. + * @param fn The function to call once all mustCall-wrapped functions have + * been called. + */ +export function afterMustCallsSatisfied(fn: () => void): void { + if (!mustCallsSatisfied()) { + afterCallsQueue.push(fn); + } else { + fn(); + } +} diff --git a/packages/grpc-js/test/common.ts b/packages/grpc-js/test/common.ts index 24cb71650..5efbf9808 100644 --- a/packages/grpc-js/test/common.ts +++ b/packages/grpc-js/test/common.ts @@ -16,9 +16,26 @@ */ import * as loader from '@grpc/proto-loader'; -import * as assert from 'assert'; +import * as assert2 from './assert2'; +import * as path from 'path'; +import * as grpc from '../src'; +import * as fsPromises from 'fs/promises'; +import * as os from 'os'; -import { GrpcObject, loadPackageDefinition } from '../src/make-client'; +import { + GrpcObject, + ServiceClientConstructor, + ServiceClient, + loadPackageDefinition, +} from '../src/make-client'; +import { readFileSync } from 'fs'; +import { + HealthListener, + SubchannelInterface, +} from '../src/subchannel-interface'; +import { EntityTypes, SubchannelRef } from '../src/channelz'; +import { Subchannel } from '../src/subchannel'; +import { ConnectivityState } from '../src/connectivity-state'; const protoLoaderOptions = { keepCase: true, @@ -32,86 +49,215 @@ export function mockFunction(): never { throw new Error('Not implemented'); } -export namespace assert2 { - const toCall = new Map<() => void, number>(); - const afterCallsQueue: Array<() => void> = []; - - /** - * Assert that the given function doesn't throw an error, and then return - * its value. - * @param fn The function to evaluate. - */ - export function noThrowAndReturn(fn: () => T): T { - try { - return fn(); - } catch (e) { - assert.throws(() => { - throw e; - }); - throw e; // for type safety only +export function loadProtoFile(file: string): GrpcObject { + const packageDefinition = loader.loadSync(file, protoLoaderOptions); + return loadPackageDefinition(packageDefinition); +} + +const protoFile = path.join(__dirname, 'fixtures', 'echo_service.proto'); +const echoService = loadProtoFile(protoFile) + .EchoService as ServiceClientConstructor; + +const ca = readFileSync(path.join(__dirname, 'fixtures', 'ca.pem')); +const key = readFileSync(path.join(__dirname, 'fixtures', 'server1.key')); +const cert = readFileSync(path.join(__dirname, 'fixtures', 'server1.pem')); + +const serviceImpl = { + echo: ( + call: grpc.ServerUnaryCall, + callback: grpc.sendUnaryData + ) => { + callback(null, call.request); + }, +}; + +export class TestServer { + private server: grpc.Server; + private target: string | null = null; + constructor(public useTls: boolean, options?: grpc.ServerOptions) { + this.server = new grpc.Server(options); + this.server.addService(echoService.service, serviceImpl); + } + + private getCredentials(): grpc.ServerCredentials { + if (this.useTls) { + return grpc.ServerCredentials.createSsl(null, [ + { private_key: key, cert_chain: cert }, + ]); + } else { + return grpc.ServerCredentials.createInsecure(); } } - /** - * Helper function that returns true when every function wrapped with - * mustCall has been called. - */ - function mustCallsSatisfied(): boolean { - let result = true; - toCall.forEach(value => { - result = result && value === 0; + start(): Promise { + return new Promise((resolve, reject) => { + this.server.bindAsync('localhost:0', this.getCredentials(), (error, port) => { + if (error) { + reject(error); + return; + } + this.target = `localhost:${port}`; + resolve(); + }); }); - return result; } - export function clearMustCalls(): void { - afterCallsQueue.length = 0; + startUds(): Promise { + return fsPromises.mkdtemp(path.join(os.tmpdir(), 'uds')).then(dir => { + return new Promise((resolve, reject) => { + const target = `unix://${dir}/socket`; + this.server.bindAsync(target, this.getCredentials(), (error, port) => { + if (error) { + reject(error); + return; + } + this.target = target; + resolve(); + }); + }); + }); } - /** - * Wraps a function to keep track of whether it was called or not. - * @param fn The function to wrap. - */ - // tslint:disable:no-any - export function mustCall( - fn: (...args: any[]) => T - ): (...args: any[]) => T { - const existingValue = toCall.get(fn); - if (existingValue !== undefined) { - toCall.set(fn, existingValue + 1); - } else { - toCall.set(fn, 1); + shutdown() { + this.server.forceShutdown(); + } + + getTarget() { + if (this.target === null) { + throw new Error('Server not yet started'); } - return (...args: any[]) => { - const result = fn(...args); - const existingValue = toCall.get(fn); - if (existingValue !== undefined) { - toCall.set(fn, existingValue - 1); - } - if (mustCallsSatisfied()) { - afterCallsQueue.forEach(fn => fn()); - afterCallsQueue.length = 0; - } - return result; - }; + return this.target; } +} - /** - * Calls the given function when every function that was wrapped with - * mustCall has been called. - * @param fn The function to call once all mustCall-wrapped functions have - * been called. - */ - export function afterMustCallsSatisfied(fn: () => void): void { - if (!mustCallsSatisfied()) { - afterCallsQueue.push(fn); +export class TestClient { + private client: ServiceClient; + constructor(target: string, useTls: boolean, options?: grpc.ChannelOptions) { + let credentials: grpc.ChannelCredentials; + if (useTls) { + credentials = grpc.credentials.createSsl(ca); } else { - fn(); + credentials = grpc.credentials.createInsecure(); } + this.client = new echoService(target, credentials, options); + } + + static createFromServer(server: TestServer, options?: grpc.ChannelOptions) { + return new TestClient(server.getTarget(), server.useTls, options); + } + + waitForReady(deadline: grpc.Deadline, callback: (error?: Error) => void) { + this.client.waitForReady(deadline, callback); + } + + sendRequest(callback: (error?: grpc.ServiceError) => void) { + this.client.echo({}, callback); + } + + sendRequestWithMetadata( + metadata: grpc.Metadata, + callback: (error?: grpc.ServiceError) => void + ) { + this.client.echo({}, metadata, callback); + } + + getChannelState() { + return this.client.getChannel().getConnectivityState(false); + } + + waitForClientState( + deadline: grpc.Deadline, + state: ConnectivityState, + callback: (error?: Error) => void + ) { + this.client + .getChannel() + .watchConnectivityState(this.getChannelState(), deadline, err => { + if (err) { + return callback(err); + } + + const currentState = this.getChannelState(); + if (currentState === state) { + callback(); + } else { + return this.waitForClientState(deadline, currentState, callback); + } + }); + } + + close() { + this.client.close(); } } -export function loadProtoFile(file: string): GrpcObject { - const packageDefinition = loader.loadSync(file, protoLoaderOptions); - return loadPackageDefinition(packageDefinition); +/** + * A mock subchannel that transitions between states on command, to test LB + * policy behavior + */ +export class MockSubchannel implements SubchannelInterface { + private state: grpc.connectivityState; + private listeners: Set = + new Set(); + constructor( + private readonly address: string, + initialState: grpc.connectivityState = grpc.connectivityState.IDLE + ) { + this.state = initialState; + } + getConnectivityState(): grpc.connectivityState { + return this.state; + } + addConnectivityStateListener( + listener: grpc.experimental.ConnectivityStateListener + ): void { + this.listeners.add(listener); + } + removeConnectivityStateListener( + listener: grpc.experimental.ConnectivityStateListener + ): void { + this.listeners.delete(listener); + } + transitionToState(nextState: grpc.connectivityState) { + grpc.experimental.trace( + grpc.logVerbosity.DEBUG, + 'subchannel', + this.address + + ' ' + + ConnectivityState[this.state] + + ' -> ' + + ConnectivityState[nextState] + ); + for (const listener of this.listeners) { + listener(this, this.state, nextState, 0); + } + this.state = nextState; + } + startConnecting(): void {} + getAddress(): string { + return this.address; + } + throttleKeepalive(newKeepaliveTime: number): void {} + ref(): void {} + unref(): void {} + getChannelzRef(): SubchannelRef { + return { + kind: EntityTypes.subchannel, + id: -1, + name: this.address, + }; + } + getRealSubchannel(): Subchannel { + throw new Error('Method not implemented.'); + } + realSubchannelEquals(other: grpc.experimental.SubchannelInterface): boolean { + return this === other; + } + isHealthy(): boolean { + return true; + } + addHealthStateWatcher(listener: HealthListener): void {} + removeHealthStateWatcher(listener: HealthListener): void {} } + +export { assert2 }; diff --git a/packages/grpc-js/test/fixtures/test_service.proto b/packages/grpc-js/test/fixtures/test_service.proto index f99393d14..2a7a303f3 100644 --- a/packages/grpc-js/test/fixtures/test_service.proto +++ b/packages/grpc-js/test/fixtures/test_service.proto @@ -21,10 +21,12 @@ message Request { bool error = 1; string message = 2; int32 errorAfter = 3; + int32 responseLength = 4; } message Response { int32 count = 1; + string message = 2; } service TestService { diff --git a/packages/grpc-js/test/generated/Request.ts b/packages/grpc-js/test/generated/Request.ts new file mode 100644 index 000000000..d64ebb6ea --- /dev/null +++ b/packages/grpc-js/test/generated/Request.ts @@ -0,0 +1,14 @@ +// Original file: test/fixtures/test_service.proto + + +export interface Request { + 'error'?: (boolean); + 'message'?: (string); + 'errorAfter'?: (number); +} + +export interface Request__Output { + 'error': (boolean); + 'message': (string); + 'errorAfter': (number); +} diff --git a/packages/grpc-js/test/generated/Response.ts b/packages/grpc-js/test/generated/Response.ts new file mode 100644 index 000000000..465ab7203 --- /dev/null +++ b/packages/grpc-js/test/generated/Response.ts @@ -0,0 +1,12 @@ +// Original file: test/fixtures/test_service.proto + + +export interface Response { + 'count'?: (number); + 'message'?: (string); +} + +export interface Response__Output { + 'count': (number); + 'message': (string); +} diff --git a/packages/grpc-js/test/generated/TestService.ts b/packages/grpc-js/test/generated/TestService.ts new file mode 100644 index 000000000..e477c99b5 --- /dev/null +++ b/packages/grpc-js/test/generated/TestService.ts @@ -0,0 +1,55 @@ +// Original file: test/fixtures/test_service.proto + +import type * as grpc from './../../src/index' +import type { MethodDefinition } from '@grpc/proto-loader' +import type { Request as _Request, Request__Output as _Request__Output } from './Request'; +import type { Response as _Response, Response__Output as _Response__Output } from './Response'; + +export interface TestServiceClient extends grpc.Client { + BidiStream(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_Request, _Response__Output>; + BidiStream(options?: grpc.CallOptions): grpc.ClientDuplexStream<_Request, _Response__Output>; + bidiStream(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_Request, _Response__Output>; + bidiStream(options?: grpc.CallOptions): grpc.ClientDuplexStream<_Request, _Response__Output>; + + ClientStream(metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_Response__Output>): grpc.ClientWritableStream<_Request>; + ClientStream(metadata: grpc.Metadata, callback: grpc.requestCallback<_Response__Output>): grpc.ClientWritableStream<_Request>; + ClientStream(options: grpc.CallOptions, callback: grpc.requestCallback<_Response__Output>): grpc.ClientWritableStream<_Request>; + ClientStream(callback: grpc.requestCallback<_Response__Output>): grpc.ClientWritableStream<_Request>; + clientStream(metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_Response__Output>): grpc.ClientWritableStream<_Request>; + clientStream(metadata: grpc.Metadata, callback: grpc.requestCallback<_Response__Output>): grpc.ClientWritableStream<_Request>; + clientStream(options: grpc.CallOptions, callback: grpc.requestCallback<_Response__Output>): grpc.ClientWritableStream<_Request>; + clientStream(callback: grpc.requestCallback<_Response__Output>): grpc.ClientWritableStream<_Request>; + + ServerStream(argument: _Request, metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientReadableStream<_Response__Output>; + ServerStream(argument: _Request, options?: grpc.CallOptions): grpc.ClientReadableStream<_Response__Output>; + serverStream(argument: _Request, metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientReadableStream<_Response__Output>; + serverStream(argument: _Request, options?: grpc.CallOptions): grpc.ClientReadableStream<_Response__Output>; + + Unary(argument: _Request, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_Response__Output>): grpc.ClientUnaryCall; + Unary(argument: _Request, metadata: grpc.Metadata, callback: grpc.requestCallback<_Response__Output>): grpc.ClientUnaryCall; + Unary(argument: _Request, options: grpc.CallOptions, callback: grpc.requestCallback<_Response__Output>): grpc.ClientUnaryCall; + Unary(argument: _Request, callback: grpc.requestCallback<_Response__Output>): grpc.ClientUnaryCall; + unary(argument: _Request, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback<_Response__Output>): grpc.ClientUnaryCall; + unary(argument: _Request, metadata: grpc.Metadata, callback: grpc.requestCallback<_Response__Output>): grpc.ClientUnaryCall; + unary(argument: _Request, options: grpc.CallOptions, callback: grpc.requestCallback<_Response__Output>): grpc.ClientUnaryCall; + unary(argument: _Request, callback: grpc.requestCallback<_Response__Output>): grpc.ClientUnaryCall; + +} + +export interface TestServiceHandlers extends grpc.UntypedServiceImplementation { + BidiStream: grpc.handleBidiStreamingCall<_Request__Output, _Response>; + + ClientStream: grpc.handleClientStreamingCall<_Request__Output, _Response>; + + ServerStream: grpc.handleServerStreamingCall<_Request__Output, _Response>; + + Unary: grpc.handleUnaryCall<_Request__Output, _Response>; + +} + +export interface TestServiceDefinition extends grpc.ServiceDefinition { + BidiStream: MethodDefinition<_Request, _Response, _Request__Output, _Response__Output> + ClientStream: MethodDefinition<_Request, _Response, _Request__Output, _Response__Output> + ServerStream: MethodDefinition<_Request, _Response, _Request__Output, _Response__Output> + Unary: MethodDefinition<_Request, _Response, _Request__Output, _Response__Output> +} diff --git a/packages/grpc-js/test/generated/test_service.ts b/packages/grpc-js/test/generated/test_service.ts new file mode 100644 index 000000000..364acddeb --- /dev/null +++ b/packages/grpc-js/test/generated/test_service.ts @@ -0,0 +1,15 @@ +import type * as grpc from '../../src/index'; +import type { MessageTypeDefinition } from '@grpc/proto-loader'; + +import type { TestServiceClient as _TestServiceClient, TestServiceDefinition as _TestServiceDefinition } from './TestService'; + +type SubtypeConstructor any, Subtype> = { + new(...args: ConstructorParameters): Subtype; +}; + +export interface ProtoGrpcType { + Request: MessageTypeDefinition + Response: MessageTypeDefinition + TestService: SubtypeConstructor & { service: _TestServiceDefinition } +} + diff --git a/packages/grpc-js/test/test-call-credentials.ts b/packages/grpc-js/test/test-call-credentials.ts index e952c5a10..007ed4847 100644 --- a/packages/grpc-js/test/test-call-credentials.ts +++ b/packages/grpc-js/test/test-call-credentials.ts @@ -86,21 +86,16 @@ describe('CallCredentials', () => { const callCredentials = CallCredentials.createFromMetadataGenerator( generateFromServiceURL ); - let metadata: Metadata; - try { - metadata = await callCredentials.generateMetadata({ - service_url: 'foo', - }); - } catch (err) { - throw err; - } + const metadata: Metadata = await callCredentials.generateMetadata({ + service_url: 'foo', + }); + assert.deepStrictEqual(metadata.get('service_url'), ['foo']); }); it('should emit an error if the associated metadataGenerator does', async () => { - const callCredentials = CallCredentials.createFromMetadataGenerator( - generateWithError - ); + const callCredentials = + CallCredentials.createFromMetadataGenerator(generateWithError); let metadata: Metadata | null = null; try { metadata = await callCredentials.generateMetadata({ service_url: '' }); @@ -112,14 +107,10 @@ describe('CallCredentials', () => { it('should combine metadata from multiple generators', async () => { const [callCreds1, callCreds2, callCreds3, callCreds4] = [ - 50, - 100, - 150, - 200, + 50, 100, 150, 200, ].map(ms => { - const generator: CallMetadataGenerator = makeAfterMsElapsedGenerator( - ms - ); + const generator: CallMetadataGenerator = + makeAfterMsElapsedGenerator(ms); return CallCredentials.createFromMetadataGenerator(generator); }); const testCases = [ @@ -147,12 +138,10 @@ describe('CallCredentials', () => { await Promise.all( testCases.map(async testCase => { const { credentials, expected } = testCase; - let metadata: Metadata; - try { - metadata = await credentials.generateMetadata({ service_url: '' }); - } catch (err) { - throw err; - } + const metadata: Metadata = await credentials.generateMetadata({ + service_url: '', + }); + assert.deepStrictEqual(metadata.get('msElapsed'), expected); }) ); diff --git a/packages/grpc-js/test/test-call-propagation.ts b/packages/grpc-js/test/test-call-propagation.ts index 3ce57be17..9ede91318 100644 --- a/packages/grpc-js/test/test-call-propagation.ts +++ b/packages/grpc-js/test/test-call-propagation.ts @@ -29,7 +29,7 @@ function multiDone(done: () => void, target: number) { if (count >= target) { done(); } - } + }; } describe('Call propagation', () => { @@ -39,33 +39,48 @@ describe('Call propagation', () => { let proxyServer: grpc.Server; let proxyClient: ServiceClient; - before((done) => { - Client = loadProtoFile(__dirname + '/fixtures/test_service.proto').TestService as ServiceClientConstructor; + before(done => { + Client = loadProtoFile(__dirname + '/fixtures/test_service.proto') + .TestService as ServiceClientConstructor; server = new grpc.Server(); server.addService(Client.service, { unary: () => {}, clientStream: () => {}, serverStream: () => {}, - bidiStream: () => {} + bidiStream: () => {}, }); proxyServer = new grpc.Server(); - server.bindAsync('localhost:0', grpc.ServerCredentials.createInsecure(), (error, port) => { - if (error) { - done(error); - return; - } - server.start(); - client = new Client(`localhost:${port}`, grpc.credentials.createInsecure()); - proxyServer.bindAsync('localhost:0', grpc.ServerCredentials.createInsecure(), (error, proxyPort) => { + server.bindAsync( + 'localhost:0', + grpc.ServerCredentials.createInsecure(), + (error, port) => { if (error) { done(error); return; } - proxyServer.start(); - proxyClient = new Client(`localhost:${proxyPort}`, grpc.credentials.createInsecure()); - done(); - }); - }); + server.start(); + client = new Client( + `localhost:${port}`, + grpc.credentials.createInsecure() + ); + proxyServer.bindAsync( + 'localhost:0', + grpc.ServerCredentials.createInsecure(), + (error, proxyPort) => { + if (error) { + done(error); + return; + } + proxyServer.start(); + proxyClient = new Client( + `localhost:${proxyPort}`, + grpc.credentials.createInsecure() + ); + done(); + } + ); + } + ); }); afterEach(() => { proxyServer.removeService(Client.service); @@ -75,63 +90,84 @@ describe('Call propagation', () => { proxyServer.forceShutdown(); }); describe('Cancellation', () => { - it('should work with unary requests', (done) => { + it('should work with unary requests', done => { done = multiDone(done, 2); + // eslint-disable-next-line prefer-const let call: grpc.ClientUnaryCall; proxyServer.addService(Client.service, { - unary: (parent: grpc.ServerUnaryCall, callback: grpc.sendUnaryData) => { - client.unary(parent.request, {parent: parent}, (error: grpc.ServiceError, value: unknown) => { - callback(error, value); - assert(error); - assert.strictEqual(error.code, grpc.status.CANCELLED); - done(); - }); + unary: ( + parent: grpc.ServerUnaryCall, + callback: grpc.sendUnaryData + ) => { + client.unary( + parent.request, + { parent: parent }, + (error: grpc.ServiceError, value: unknown) => { + callback(error, value); + assert(error); + assert.strictEqual(error.code, grpc.status.CANCELLED); + done(); + } + ); /* Cancel the original call after the server starts processing it to * ensure that it does reach the server. */ call.cancel(); - } - }); - call = proxyClient.unary({}, (error: grpc.ServiceError, value: unknown) => { - assert(error); - assert.strictEqual(error.code, grpc.status.CANCELLED); - done(); + }, }); + call = proxyClient.unary( + {}, + (error: grpc.ServiceError, value: unknown) => { + assert(error); + assert.strictEqual(error.code, grpc.status.CANCELLED); + done(); + } + ); }); - it('Should work with client streaming requests', (done) => { + it('Should work with client streaming requests', done => { done = multiDone(done, 2); + // eslint-disable-next-line prefer-const let call: grpc.ClientWritableStream; proxyServer.addService(Client.service, { - clientStream: (parent: grpc.ServerReadableStream, callback: grpc.sendUnaryData) => { - client.clientStream({parent: parent}, (error: grpc.ServiceError, value: unknown) => { - callback(error, value); - assert(error); - assert.strictEqual(error.code, grpc.status.CANCELLED); - done(); - }); + clientStream: ( + parent: grpc.ServerReadableStream, + callback: grpc.sendUnaryData + ) => { + client.clientStream( + { parent: parent }, + (error: grpc.ServiceError, value: unknown) => { + callback(error, value); + assert(error); + assert.strictEqual(error.code, grpc.status.CANCELLED); + done(); + } + ); /* Cancel the original call after the server starts processing it to * ensure that it does reach the server. */ call.cancel(); - } - }); - call = proxyClient.clientStream((error: grpc.ServiceError, value: unknown) => { - assert(error); - assert.strictEqual(error.code, grpc.status.CANCELLED); - done(); + }, }); + call = proxyClient.clientStream( + (error: grpc.ServiceError, value: unknown) => { + assert(error); + assert.strictEqual(error.code, grpc.status.CANCELLED); + done(); + } + ); }); - it('Should work with server streaming requests', (done) => { + it('Should work with server streaming requests', done => { done = multiDone(done, 2); + // eslint-disable-next-line prefer-const let call: grpc.ClientReadableStream; proxyServer.addService(Client.service, { serverStream: (parent: grpc.ServerWritableStream) => { - const child = client.serverStream(parent.request, {parent: parent}); + const child = client.serverStream(parent.request, { parent: parent }); child.on('error', () => {}); child.on('status', (status: grpc.StatusObject) => { assert.strictEqual(status.code, grpc.status.CANCELLED); done(); }); call.cancel(); - } + }, }); call = proxyClient.serverStream({}); call.on('error', () => {}); @@ -140,19 +176,20 @@ describe('Call propagation', () => { done(); }); }); - it('Should work with bidi streaming requests', (done) => { + it('Should work with bidi streaming requests', done => { done = multiDone(done, 2); + // eslint-disable-next-line prefer-const let call: grpc.ClientDuplexStream; proxyServer.addService(Client.service, { bidiStream: (parent: grpc.ServerDuplexStream) => { - const child = client.bidiStream({parent: parent}); + const child = client.bidiStream({ parent: parent }); child.on('error', () => {}); child.on('status', (status: grpc.StatusObject) => { assert.strictEqual(status.code, grpc.status.CANCELLED); done(); }); call.cancel(); - } + }, }); call = proxyClient.bidiStream(); call.on('error', () => {}); @@ -163,86 +200,113 @@ describe('Call propagation', () => { }); }); describe('Deadlines', () => { - it('should work with unary requests', (done) => { + it('should work with unary requests', done => { done = multiDone(done, 2); - let call: grpc.ClientUnaryCall; proxyServer.addService(Client.service, { - unary: (parent: grpc.ServerUnaryCall, callback: grpc.sendUnaryData) => { - client.unary(parent.request, {parent: parent, propagate_flags: grpc.propagate.DEADLINE}, (error: grpc.ServiceError, value: unknown) => { - callback(error, value); - assert(error); - assert.strictEqual(error.code, grpc.status.DEADLINE_EXCEEDED); - done(); - }); - } + unary: ( + parent: grpc.ServerUnaryCall, + callback: grpc.sendUnaryData + ) => { + client.unary( + parent.request, + { parent: parent, propagate_flags: grpc.propagate.DEADLINE }, + (error: grpc.ServiceError, value: unknown) => { + callback(error, value); + assert(error); + assert.strictEqual(error.code, grpc.status.DEADLINE_EXCEEDED); + done(); + } + ); + }, }); const deadline = new Date(); deadline.setMilliseconds(deadline.getMilliseconds() + 100); - call = proxyClient.unary({}, {deadline}, (error: grpc.ServiceError, value: unknown) => { - assert(error); - assert.strictEqual(error.code, grpc.status.DEADLINE_EXCEEDED); - done(); - }); + proxyClient.unary( + {}, + { deadline }, + (error: grpc.ServiceError, value: unknown) => { + assert(error); + assert.strictEqual(error.code, grpc.status.DEADLINE_EXCEEDED); + done(); + } + ); }); - it('Should work with client streaming requests', (done) => { + it('Should work with client streaming requests', done => { done = multiDone(done, 2); - let call: grpc.ClientWritableStream; + proxyServer.addService(Client.service, { - clientStream: (parent: grpc.ServerReadableStream, callback: grpc.sendUnaryData) => { - client.clientStream({parent: parent, propagate_flags: grpc.propagate.DEADLINE}, (error: grpc.ServiceError, value: unknown) => { - callback(error, value); - assert(error); - assert.strictEqual(error.code, grpc.status.DEADLINE_EXCEEDED); - done(); - }); - } + clientStream: ( + parent: grpc.ServerReadableStream, + callback: grpc.sendUnaryData + ) => { + client.clientStream( + { parent: parent, propagate_flags: grpc.propagate.DEADLINE }, + (error: grpc.ServiceError, value: unknown) => { + callback(error, value); + assert(error); + assert.strictEqual(error.code, grpc.status.DEADLINE_EXCEEDED); + done(); + } + ); + }, }); const deadline = new Date(); deadline.setMilliseconds(deadline.getMilliseconds() + 100); - call = proxyClient.clientStream({deadline, propagate_flags: grpc.propagate.DEADLINE}, (error: grpc.ServiceError, value: unknown) => { - assert(error); - assert.strictEqual(error.code, grpc.status.DEADLINE_EXCEEDED); - done(); - }); + proxyClient.clientStream( + { deadline, propagate_flags: grpc.propagate.DEADLINE }, + (error: grpc.ServiceError, value: unknown) => { + assert(error); + assert.strictEqual(error.code, grpc.status.DEADLINE_EXCEEDED); + done(); + } + ); }); - it('Should work with server streaming requests', (done) => { + it('Should work with server streaming requests', done => { done = multiDone(done, 2); let call: grpc.ClientReadableStream; proxyServer.addService(Client.service, { serverStream: (parent: grpc.ServerWritableStream) => { - const child = client.serverStream(parent.request, {parent: parent, propagate_flags: grpc.propagate.DEADLINE}); + const child = client.serverStream(parent.request, { + parent: parent, + propagate_flags: grpc.propagate.DEADLINE, + }); child.on('error', () => {}); child.on('status', (status: grpc.StatusObject) => { assert.strictEqual(status.code, grpc.status.DEADLINE_EXCEEDED); done(); }); - } + }, }); const deadline = new Date(); deadline.setMilliseconds(deadline.getMilliseconds() + 100); - call = proxyClient.serverStream({}, {deadline}); + // eslint-disable-next-line prefer-const + call = proxyClient.serverStream({}, { deadline }); call.on('error', () => {}); call.on('status', (status: grpc.StatusObject) => { assert.strictEqual(status.code, grpc.status.DEADLINE_EXCEEDED); done(); }); }); - it('Should work with bidi streaming requests', (done) => { + it('Should work with bidi streaming requests', done => { done = multiDone(done, 2); let call: grpc.ClientDuplexStream; proxyServer.addService(Client.service, { bidiStream: (parent: grpc.ServerDuplexStream) => { - const child = client.bidiStream({parent: parent, propagate_flags: grpc.propagate.DEADLINE}); + const child = client.bidiStream({ + parent: parent, + propagate_flags: grpc.propagate.DEADLINE, + }); child.on('error', () => {}); child.on('status', (status: grpc.StatusObject) => { assert.strictEqual(status.code, grpc.status.DEADLINE_EXCEEDED); done(); }); - } + }, }); const deadline = new Date(); deadline.setMilliseconds(deadline.getMilliseconds() + 100); - call = proxyClient.bidiStream({deadline}); + // eslint-disable-next-line prefer-const + call = proxyClient.bidiStream({ deadline }); call.on('error', () => {}); call.on('status', (status: grpc.StatusObject) => { assert.strictEqual(status.code, grpc.status.DEADLINE_EXCEEDED); @@ -250,4 +314,4 @@ describe('Call propagation', () => { }); }); }); -}); \ No newline at end of file +}); diff --git a/packages/grpc-js/test/test-channel-credentials.ts b/packages/grpc-js/test/test-channel-credentials.ts index d6028f469..b5c011581 100644 --- a/packages/grpc-js/test/test-channel-credentials.ts +++ b/packages/grpc-js/test/test-channel-credentials.ts @@ -17,12 +17,20 @@ import * as assert from 'assert'; import * as fs from 'fs'; +import * as path from 'path'; import { promisify } from 'util'; import { CallCredentials } from '../src/call-credentials'; import { ChannelCredentials } from '../src/channel-credentials'; +import * as grpc from '../src'; +import { ServiceClient, ServiceClientConstructor } from '../src/make-client'; -import { assert2, mockFunction } from './common'; +import { assert2, loadProtoFile, mockFunction } from './common'; +import { sendUnaryData, ServerUnaryCall, ServiceError } from '../src'; + +const protoFile = path.join(__dirname, 'fixtures', 'echo_service.proto'); +const echoService = loadProtoFile(protoFile) + .EchoService as ServiceClientConstructor; class CallCredentialsMock implements CallCredentials { child: CallCredentialsMock | null = null; @@ -138,3 +146,91 @@ describe('ChannelCredentials Implementation', () => { }); }); }); + +describe('ChannelCredentials usage', () => { + let client: ServiceClient; + let server: grpc.Server; + let portNum: number; + let caCert: Buffer; + const hostnameOverride = 'foo.test.google.fr'; + before(async () => { + const { ca, key, cert } = await pFixtures; + caCert = ca; + const serverCreds = grpc.ServerCredentials.createSsl(null, [ + { private_key: key, cert_chain: cert }, + ]); + const channelCreds = ChannelCredentials.createSsl(ca); + const callCreds = CallCredentials.createFromMetadataGenerator( + (options, cb) => { + const metadata = new grpc.Metadata(); + metadata.set('test-key', 'test-value'); + cb(null, metadata); + } + ); + const combinedCreds = channelCreds.compose(callCreds); + return new Promise((resolve, reject) => { + server = new grpc.Server(); + server.addService(echoService.service, { + echo(call: ServerUnaryCall, callback: sendUnaryData) { + call.sendMetadata(call.metadata); + callback(null, call.request); + }, + }); + + server.bindAsync('localhost:0', serverCreds, (err, port) => { + if (err) { + reject(err); + return; + } + portNum = port; + client = new echoService(`localhost:${port}`, combinedCreds, { + 'grpc.ssl_target_name_override': hostnameOverride, + 'grpc.default_authority': hostnameOverride, + }); + server.start(); + resolve(); + }); + }); + }); + after(() => { + server.forceShutdown(); + }); + + it('Should send the metadata from call credentials attached to channel credentials', done => { + const call = client.echo( + { value: 'test value', value2: 3 }, + assert2.mustCall((error: ServiceError, response: any) => { + assert.ifError(error); + assert.deepStrictEqual(response, { value: 'test value', value2: 3 }); + }) + ); + call.on( + 'metadata', + assert2.mustCall((metadata: grpc.Metadata) => { + assert.deepStrictEqual(metadata.get('test-key'), ['test-value']); + }) + ); + assert2.afterMustCallsSatisfied(done); + }); + + it('Should call the checkServerIdentity callback', done => { + const channelCreds = ChannelCredentials.createSsl(caCert, null, null, { + checkServerIdentity: assert2.mustCall((hostname, cert) => { + assert.strictEqual(hostname, hostnameOverride); + return undefined; + }), + }); + const client = new echoService(`localhost:${portNum}`, channelCreds, { + 'grpc.ssl_target_name_override': hostnameOverride, + 'grpc.default_authority': hostnameOverride, + }); + client.echo( + { value: 'test value', value2: 3 }, + assert2.mustCall((error: ServiceError, response: any) => { + assert.ifError(error); + assert.deepStrictEqual(response, { value: 'test value', value2: 3 }); + }) + ); + assert2.afterMustCallsSatisfied(done); + }); +}); diff --git a/packages/grpc-js/test/test-channelz.ts b/packages/grpc-js/test/test-channelz.ts new file mode 100644 index 000000000..5c3dc3d7d --- /dev/null +++ b/packages/grpc-js/test/test-channelz.ts @@ -0,0 +1,574 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import * as assert from 'assert'; +import * as protoLoader from '@grpc/proto-loader'; +import * as grpc from '../src'; + +import { ProtoGrpcType } from '../src/generated/channelz'; +import { ChannelzClient } from '../src/generated/grpc/channelz/v1/Channelz'; +import { ServiceClient, ServiceClientConstructor } from '../src/make-client'; +import { loadProtoFile } from './common'; + +const loadedChannelzProto = protoLoader.loadSync('channelz.proto', { + keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true, + includeDirs: [`${__dirname}/../../proto`], +}); +const channelzGrpcObject = grpc.loadPackageDefinition( + loadedChannelzProto +) as unknown as ProtoGrpcType; + +const TestServiceClient = loadProtoFile( + `${__dirname}/fixtures/test_service.proto` +).TestService as ServiceClientConstructor; + +const testServiceImpl: grpc.UntypedServiceImplementation = { + unary( + call: grpc.ServerUnaryCall, + callback: grpc.sendUnaryData + ) { + if (call.request.error) { + setTimeout(() => { + callback({ + code: grpc.status.INVALID_ARGUMENT, + details: call.request.message, + }); + }, call.request.errorAfter); + } else { + callback(null, { count: 1 }); + } + }, +}; + +describe('Channelz', () => { + let channelzServer: grpc.Server; + let channelzClient: ChannelzClient; + let testServer: grpc.Server; + let testClient: ServiceClient; + + before(done => { + channelzServer = new grpc.Server(); + channelzServer.addService( + grpc.getChannelzServiceDefinition(), + grpc.getChannelzHandlers() + ); + channelzServer.bindAsync( + 'localhost:0', + grpc.ServerCredentials.createInsecure(), + (error, port) => { + if (error) { + done(error); + return; + } + channelzServer.start(); + channelzClient = new channelzGrpcObject.grpc.channelz.v1.Channelz( + `localhost:${port}`, + grpc.credentials.createInsecure() + ); + done(); + } + ); + }); + + after(() => { + channelzClient.close(); + channelzServer.forceShutdown(); + }); + + beforeEach(done => { + testServer = new grpc.Server(); + testServer.addService(TestServiceClient.service, testServiceImpl); + testServer.bindAsync( + 'localhost:0', + grpc.ServerCredentials.createInsecure(), + (error, port) => { + if (error) { + done(error); + return; + } + testServer.start(); + testClient = new TestServiceClient( + `localhost:${port}`, + grpc.credentials.createInsecure() + ); + done(); + } + ); + }); + + afterEach(() => { + testClient.close(); + testServer.forceShutdown(); + }); + + it('should see a newly created channel', done => { + // Test that the specific test client channel info can be retrieved + channelzClient.GetChannel( + { channel_id: testClient.getChannel().getChannelzRef().id }, + (error, result) => { + assert.ifError(error); + assert(result); + assert(result.channel); + assert(result.channel.ref); + assert.strictEqual( + +result.channel.ref.channel_id, + testClient.getChannel().getChannelzRef().id + ); + // Test that the channel is in the list of top channels + channelzClient.getTopChannels( + { + start_channel_id: testClient.getChannel().getChannelzRef().id, + max_results: 1, + }, + (error, result) => { + assert.ifError(error); + assert(result); + assert.strictEqual(result.channel.length, 1); + assert(result.channel[0].ref); + assert.strictEqual( + +result.channel[0].ref.channel_id, + testClient.getChannel().getChannelzRef().id + ); + done(); + } + ); + } + ); + }); + + it('should see a newly created server', done => { + // Test that the specific test server info can be retrieved + channelzClient.getServer( + { server_id: testServer.getChannelzRef().id }, + (error, result) => { + assert.ifError(error); + assert(result); + assert(result.server); + assert(result.server.ref); + assert.strictEqual( + +result.server.ref.server_id, + testServer.getChannelzRef().id + ); + // Test that the server is in the list of servers + channelzClient.getServers( + { start_server_id: testServer.getChannelzRef().id, max_results: 1 }, + (error, result) => { + assert.ifError(error); + assert(result); + assert.strictEqual(result.server.length, 1); + assert(result.server[0].ref); + assert.strictEqual( + +result.server[0].ref.server_id, + testServer.getChannelzRef().id + ); + done(); + } + ); + } + ); + }); + + it('should count successful calls', done => { + testClient.unary({}, (error: grpc.ServiceError, value: unknown) => { + assert.ifError(error); + // Channel data tests + channelzClient.GetChannel( + { channel_id: testClient.getChannel().getChannelzRef().id }, + (error, channelResult) => { + assert.ifError(error); + assert(channelResult); + assert(channelResult.channel); + assert(channelResult.channel.ref); + assert(channelResult.channel.data); + assert.strictEqual(+channelResult.channel.data.calls_started, 1); + assert.strictEqual(+channelResult.channel.data.calls_succeeded, 1); + assert.strictEqual(+channelResult.channel.data.calls_failed, 0); + assert.strictEqual(channelResult.channel.subchannel_ref.length, 1); + channelzClient.getSubchannel( + { + subchannel_id: + channelResult.channel.subchannel_ref[0].subchannel_id, + }, + (error, subchannelResult) => { + assert.ifError(error); + assert(subchannelResult); + assert(subchannelResult.subchannel); + assert(subchannelResult.subchannel.ref); + assert(subchannelResult.subchannel.data); + assert.strictEqual( + subchannelResult.subchannel.ref.subchannel_id, + channelResult.channel!.subchannel_ref[0].subchannel_id + ); + assert.strictEqual( + +subchannelResult.subchannel.data.calls_started, + 1 + ); + assert.strictEqual( + +subchannelResult.subchannel.data.calls_succeeded, + 1 + ); + assert.strictEqual( + +subchannelResult.subchannel.data.calls_failed, + 0 + ); + assert.strictEqual( + subchannelResult.subchannel.socket_ref.length, + 1 + ); + channelzClient.getSocket( + { + socket_id: + subchannelResult.subchannel.socket_ref[0].socket_id, + }, + (error, socketResult) => { + assert.ifError(error); + assert(socketResult); + assert(socketResult.socket); + assert(socketResult.socket.ref); + assert(socketResult.socket.data); + assert.strictEqual( + socketResult.socket.ref.socket_id, + subchannelResult.subchannel!.socket_ref[0].socket_id + ); + assert.strictEqual( + +socketResult.socket.data.streams_started, + 1 + ); + assert.strictEqual( + +socketResult.socket.data.streams_succeeded, + 1 + ); + assert.strictEqual( + +socketResult.socket.data.streams_failed, + 0 + ); + assert.strictEqual( + +socketResult.socket.data.messages_received, + 1 + ); + assert.strictEqual( + +socketResult.socket.data.messages_sent, + 1 + ); + // Server data tests + channelzClient.getServer( + { server_id: testServer.getChannelzRef().id }, + (error, serverResult) => { + assert.ifError(error); + assert(serverResult); + assert(serverResult.server); + assert(serverResult.server.ref); + assert(serverResult.server.data); + assert.strictEqual( + +serverResult.server.ref.server_id, + testServer.getChannelzRef().id + ); + assert.strictEqual( + +serverResult.server.data.calls_started, + 1 + ); + assert.strictEqual( + +serverResult.server.data.calls_succeeded, + 1 + ); + assert.strictEqual( + +serverResult.server.data.calls_failed, + 0 + ); + channelzClient.getServerSockets( + { server_id: testServer.getChannelzRef().id }, + (error, socketsResult) => { + assert.ifError(error); + assert(socketsResult); + assert.strictEqual( + socketsResult.socket_ref.length, + 1 + ); + channelzClient.getSocket( + { + socket_id: socketsResult.socket_ref[0].socket_id, + }, + (error, serverSocketResult) => { + assert.ifError(error); + assert(serverSocketResult); + assert(serverSocketResult.socket); + assert(serverSocketResult.socket.ref); + assert(serverSocketResult.socket.data); + assert.strictEqual( + serverSocketResult.socket.ref.socket_id, + socketsResult.socket_ref[0].socket_id + ); + assert.strictEqual( + +serverSocketResult.socket.data.streams_started, + 1 + ); + assert.strictEqual( + +serverSocketResult.socket.data + .streams_succeeded, + 1 + ); + assert.strictEqual( + +serverSocketResult.socket.data.streams_failed, + 0 + ); + assert.strictEqual( + +serverSocketResult.socket.data + .messages_received, + 1 + ); + assert.strictEqual( + +serverSocketResult.socket.data.messages_sent, + 1 + ); + done(); + } + ); + } + ); + } + ); + } + ); + } + ); + } + ); + }); + }); + + it('should count failed calls', done => { + testClient.unary( + { error: true }, + (error: grpc.ServiceError, value: unknown) => { + assert(error); + // Channel data tests + channelzClient.GetChannel( + { channel_id: testClient.getChannel().getChannelzRef().id }, + (error, channelResult) => { + assert.ifError(error); + assert(channelResult); + assert(channelResult.channel); + assert(channelResult.channel.ref); + assert(channelResult.channel.data); + assert.strictEqual(+channelResult.channel.data.calls_started, 1); + assert.strictEqual(+channelResult.channel.data.calls_succeeded, 0); + assert.strictEqual(+channelResult.channel.data.calls_failed, 1); + assert.strictEqual(channelResult.channel.subchannel_ref.length, 1); + channelzClient.getSubchannel( + { + subchannel_id: + channelResult.channel.subchannel_ref[0].subchannel_id, + }, + (error, subchannelResult) => { + assert.ifError(error); + assert(subchannelResult); + assert(subchannelResult.subchannel); + assert(subchannelResult.subchannel.ref); + assert(subchannelResult.subchannel.data); + assert.strictEqual( + subchannelResult.subchannel.ref.subchannel_id, + channelResult.channel!.subchannel_ref[0].subchannel_id + ); + assert.strictEqual( + +subchannelResult.subchannel.data.calls_started, + 1 + ); + assert.strictEqual( + +subchannelResult.subchannel.data.calls_succeeded, + 0 + ); + assert.strictEqual( + +subchannelResult.subchannel.data.calls_failed, + 1 + ); + assert.strictEqual( + subchannelResult.subchannel.socket_ref.length, + 1 + ); + channelzClient.getSocket( + { + socket_id: + subchannelResult.subchannel.socket_ref[0].socket_id, + }, + (error, socketResult) => { + assert.ifError(error); + assert(socketResult); + assert(socketResult.socket); + assert(socketResult.socket.ref); + assert(socketResult.socket.data); + assert.strictEqual( + socketResult.socket.ref.socket_id, + subchannelResult.subchannel!.socket_ref[0].socket_id + ); + assert.strictEqual( + +socketResult.socket.data.streams_started, + 1 + ); + assert.strictEqual( + +socketResult.socket.data.streams_succeeded, + 1 + ); + assert.strictEqual( + +socketResult.socket.data.streams_failed, + 0 + ); + assert.strictEqual( + +socketResult.socket.data.messages_received, + 0 + ); + assert.strictEqual( + +socketResult.socket.data.messages_sent, + 1 + ); + // Server data tests + channelzClient.getServer( + { server_id: testServer.getChannelzRef().id }, + (error, serverResult) => { + assert.ifError(error); + assert(serverResult); + assert(serverResult.server); + assert(serverResult.server.ref); + assert(serverResult.server.data); + assert.strictEqual( + +serverResult.server.ref.server_id, + testServer.getChannelzRef().id + ); + assert.strictEqual( + +serverResult.server.data.calls_started, + 1 + ); + assert.strictEqual( + +serverResult.server.data.calls_succeeded, + 0 + ); + assert.strictEqual( + +serverResult.server.data.calls_failed, + 1 + ); + channelzClient.getServerSockets( + { server_id: testServer.getChannelzRef().id }, + (error, socketsResult) => { + assert.ifError(error); + assert(socketsResult); + assert.strictEqual( + socketsResult.socket_ref.length, + 1 + ); + channelzClient.getSocket( + { + socket_id: + socketsResult.socket_ref[0].socket_id, + }, + (error, serverSocketResult) => { + assert.ifError(error); + assert(serverSocketResult); + assert(serverSocketResult.socket); + assert(serverSocketResult.socket.ref); + assert(serverSocketResult.socket.data); + assert.strictEqual( + serverSocketResult.socket.ref.socket_id, + socketsResult.socket_ref[0].socket_id + ); + assert.strictEqual( + +serverSocketResult.socket.data + .streams_started, + 1 + ); + assert.strictEqual( + +serverSocketResult.socket.data + .streams_succeeded, + 1 + ); + assert.strictEqual( + +serverSocketResult.socket.data + .streams_failed, + 0 + ); + assert.strictEqual( + +serverSocketResult.socket.data + .messages_received, + 1 + ); + assert.strictEqual( + +serverSocketResult.socket.data.messages_sent, + 0 + ); + done(); + } + ); + } + ); + } + ); + } + ); + } + ); + } + ); + } + ); + }); +}); + +describe('Disabling channelz', () => { + let testServer: grpc.Server; + let testClient: ServiceClient; + beforeEach(done => { + testServer = new grpc.Server({ 'grpc.enable_channelz': 0 }); + testServer.addService(TestServiceClient.service, testServiceImpl); + testServer.bindAsync( + 'localhost:0', + grpc.ServerCredentials.createInsecure(), + (error, port) => { + if (error) { + done(error); + return; + } + testServer.start(); + testClient = new TestServiceClient( + `localhost:${port}`, + grpc.credentials.createInsecure(), + { 'grpc.enable_channelz': 0 } + ); + done(); + } + ); + }); + + afterEach(() => { + testClient.close(); + testServer.forceShutdown(); + }); + + it('Should still work', done => { + const deadline = new Date(); + deadline.setSeconds(deadline.getSeconds() + 1); + testClient.unary( + {}, + { deadline }, + (error: grpc.ServiceError, value: unknown) => { + assert.ifError(error); + done(); + } + ); + }); +}); diff --git a/packages/grpc-js/test/test-client.ts b/packages/grpc-js/test/test-client.ts index 0d2878cbc..67b396015 100644 --- a/packages/grpc-js/test/test-client.ts +++ b/packages/grpc-js/test/test-client.ts @@ -20,7 +20,7 @@ import * as assert from 'assert'; import * as grpc from '../src'; import { Server, ServerCredentials } from '../src'; import { Client } from '../src'; -import { ConnectivityState } from '../src/channel'; +import { ConnectivityState } from '../src/connectivity-state'; const clientInsecureCreds = grpc.credentials.createInsecure(); const serverInsecureCreds = ServerCredentials.createInsecure(); @@ -32,19 +32,12 @@ describe('Client', () => { before(done => { server = new Server(); - server.bindAsync( - 'localhost:0', - serverInsecureCreds, - (err, port) => { - assert.ifError(err); - client = new Client( - `localhost:${port}`, - clientInsecureCreds - ); - server.start(); - done(); - } - ); + server.bindAsync('localhost:0', serverInsecureCreds, (err, port) => { + assert.ifError(err); + client = new Client(`localhost:${port}`, clientInsecureCreds); + server.start(); + done(); + }); }); after(done => { @@ -79,16 +72,65 @@ describe('Client without a server', () => { after(() => { client.close(); }); - it('should fail multiple calls to the nonexistent server', done => { + it('should fail multiple calls to the nonexistent server', function (done) { + this.timeout(5000); // Regression test for https://github.com/grpc/grpc-node/issues/1411 - client.makeUnaryRequest('/service/method', x => x, x => x, Buffer.from([]), (error, value) => { - assert(error); - assert.strictEqual(error?.code, grpc.status.UNAVAILABLE); - client.makeUnaryRequest('/service/method', x => x, x => x, Buffer.from([]), (error, value) => { + client.makeUnaryRequest( + '/service/method', + x => x, + x => x, + Buffer.from([]), + (error, value) => { assert(error); assert.strictEqual(error?.code, grpc.status.UNAVAILABLE); - done(); - }); - }); + client.makeUnaryRequest( + '/service/method', + x => x, + x => x, + Buffer.from([]), + (error, value) => { + assert(error); + assert.strictEqual(error?.code, grpc.status.UNAVAILABLE); + done(); + } + ); + } + ); + }); +}); + +describe('Client with a nonexistent target domain', () => { + let client: Client; + before(() => { + // DNS name that does not exist per RFC 6761 section 6.4 + client = new Client('host.invalid', clientInsecureCreds); }); -}); \ No newline at end of file + after(() => { + client.close(); + }); + it('should fail multiple calls', function (done) { + this.timeout(5000); + // Regression test for https://github.com/grpc/grpc-node/issues/1411 + client.makeUnaryRequest( + '/service/method', + x => x, + x => x, + Buffer.from([]), + (error, value) => { + assert(error); + assert.strictEqual(error?.code, grpc.status.UNAVAILABLE); + client.makeUnaryRequest( + '/service/method', + x => x, + x => x, + Buffer.from([]), + (error, value) => { + assert(error); + assert.strictEqual(error?.code, grpc.status.UNAVAILABLE); + done(); + } + ); + } + ); + }); +}); diff --git a/packages/grpc-js/test/test-confg-parsing.ts b/packages/grpc-js/test/test-confg-parsing.ts new file mode 100644 index 000000000..b5b9832a7 --- /dev/null +++ b/packages/grpc-js/test/test-confg-parsing.ts @@ -0,0 +1,217 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import { experimental } from '../src'; +import * as assert from 'assert'; +import parseLoadBalancingConfig = experimental.parseLoadBalancingConfig; + +/** + * Describes a test case for config parsing. input is passed to + * parseLoadBalancingConfig. If error is set, the expectation is that that + * operation throws an error with a matching message. Otherwise, toJsonObject + * is called on the result, and it is expected to match output, or input if + * output is unset. + */ +interface TestCase { + name: string; + input: object; + output?: object; + error?: RegExp; +} + +/* The main purpose of these tests is to verify that configs that are expected + * to be valid parse successfully, and configs that are expected to be invalid + * throw errors. The specific output of this parsing is a lower priority + * concern. + * Note: some tests have an expected output that is different from the output, + * but all non-error tests additionally verify that parsing the output again + * produces the same output. */ +const allTestCases: { [lbPolicyName: string]: TestCase[] } = { + pick_first: [ + { + name: 'no fields set', + input: {}, + output: { + shuffleAddressList: false, + }, + }, + { + name: 'shuffleAddressList set', + input: { + shuffleAddressList: true, + }, + }, + ], + round_robin: [ + { + name: 'no fields set', + input: {}, + }, + ], + outlier_detection: [ + { + name: 'only required fields set', + input: { + child_policy: [{ round_robin: {} }], + }, + output: { + interval: { + seconds: 10, + nanos: 0, + }, + base_ejection_time: { + seconds: 30, + nanos: 0, + }, + max_ejection_time: { + seconds: 300, + nanos: 0, + }, + max_ejection_percent: 10, + success_rate_ejection: undefined, + failure_percentage_ejection: undefined, + child_policy: [{ round_robin: {} }], + }, + }, + { + name: 'all optional fields undefined', + input: { + interval: undefined, + base_ejection_time: undefined, + max_ejection_time: undefined, + max_ejection_percent: undefined, + success_rate_ejection: undefined, + failure_percentage_ejection: undefined, + child_policy: [{ round_robin: {} }], + }, + output: { + interval: { + seconds: 10, + nanos: 0, + }, + base_ejection_time: { + seconds: 30, + nanos: 0, + }, + max_ejection_time: { + seconds: 300, + nanos: 0, + }, + max_ejection_percent: 10, + success_rate_ejection: undefined, + failure_percentage_ejection: undefined, + child_policy: [{ round_robin: {} }], + }, + }, + { + name: 'empty ejection configs', + input: { + success_rate_ejection: {}, + failure_percentage_ejection: {}, + child_policy: [{ round_robin: {} }], + }, + output: { + interval: { + seconds: 10, + nanos: 0, + }, + base_ejection_time: { + seconds: 30, + nanos: 0, + }, + max_ejection_time: { + seconds: 300, + nanos: 0, + }, + max_ejection_percent: 10, + success_rate_ejection: { + stdev_factor: 1900, + enforcement_percentage: 100, + minimum_hosts: 5, + request_volume: 100, + }, + failure_percentage_ejection: { + threshold: 85, + enforcement_percentage: 100, + minimum_hosts: 5, + request_volume: 50, + }, + child_policy: [{ round_robin: {} }], + }, + }, + { + name: 'all fields populated', + input: { + interval: { + seconds: 20, + nanos: 0, + }, + base_ejection_time: { + seconds: 40, + nanos: 0, + }, + max_ejection_time: { + seconds: 400, + nanos: 0, + }, + max_ejection_percent: 20, + success_rate_ejection: { + stdev_factor: 1800, + enforcement_percentage: 90, + minimum_hosts: 4, + request_volume: 200, + }, + failure_percentage_ejection: { + threshold: 95, + enforcement_percentage: 90, + minimum_hosts: 4, + request_volume: 60, + }, + child_policy: [{ round_robin: {} }], + }, + }, + ], +}; + +describe('Load balancing policy config parsing', () => { + for (const [lbPolicyName, testCases] of Object.entries(allTestCases)) { + describe(lbPolicyName, () => { + for (const testCase of testCases) { + it(testCase.name, () => { + const lbConfigInput = { [lbPolicyName]: testCase.input }; + if (testCase.error) { + assert.throws(() => { + parseLoadBalancingConfig(lbConfigInput); + }, testCase.error); + } else { + const expectedOutput = testCase.output ?? testCase.input; + const parsedJson = + parseLoadBalancingConfig(lbConfigInput).toJsonObject(); + assert.deepStrictEqual(parsedJson, { + [lbPolicyName]: expectedOutput, + }); + // Test idempotency + assert.deepStrictEqual( + parseLoadBalancingConfig(parsedJson).toJsonObject(), + parsedJson + ); + } + }); + } + }); + } +}); diff --git a/packages/grpc-js/test/test-deadline.ts b/packages/grpc-js/test/test-deadline.ts new file mode 100644 index 000000000..24aebd4d7 --- /dev/null +++ b/packages/grpc-js/test/test-deadline.ts @@ -0,0 +1,93 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import * as assert from 'assert'; + +import * as grpc from '../src'; +import { ServiceClient, ServiceClientConstructor } from '../src/make-client'; +import { loadProtoFile } from './common'; + +const TIMEOUT_SERVICE_CONFIG: grpc.ServiceConfig = { + loadBalancingConfig: [], + methodConfig: [ + { + name: [{ service: 'TestService' }], + timeout: { + seconds: 1, + nanos: 0, + }, + }, + ], +}; + +describe('Client with configured timeout', () => { + let server: grpc.Server; + let Client: ServiceClientConstructor; + let client: ServiceClient; + + before(done => { + Client = loadProtoFile(__dirname + '/fixtures/test_service.proto') + .TestService as ServiceClientConstructor; + server = new grpc.Server(); + server.addService(Client.service, { + unary: () => {}, + clientStream: () => {}, + serverStream: () => {}, + bidiStream: () => {}, + }); + server.bindAsync( + 'localhost:0', + grpc.ServerCredentials.createInsecure(), + (error, port) => { + if (error) { + done(error); + return; + } + server.start(); + client = new Client( + `localhost:${port}`, + grpc.credentials.createInsecure(), + { 'grpc.service_config': JSON.stringify(TIMEOUT_SERVICE_CONFIG) } + ); + done(); + } + ); + }); + + after(done => { + client.close(); + server.tryShutdown(done); + }); + + it('Should end calls without explicit deadline with DEADLINE_EXCEEDED', done => { + client.unary({}, (error: grpc.ServiceError, value: unknown) => { + assert(error); + assert.strictEqual(error.code, grpc.status.DEADLINE_EXCEEDED); + done(); + }); + }); + + it('Should end calls with a long explicit deadline with DEADLINE_EXCEEDED', done => { + const deadline = new Date(); + deadline.setSeconds(deadline.getSeconds() + 20); + client.unary({}, (error: grpc.ServiceError, value: unknown) => { + assert(error); + assert.strictEqual(error.code, grpc.status.DEADLINE_EXCEEDED); + done(); + }); + }); +}); diff --git a/packages/grpc-js/test/test-global-subchannel-pool.ts b/packages/grpc-js/test/test-global-subchannel-pool.ts new file mode 100644 index 000000000..f49221446 --- /dev/null +++ b/packages/grpc-js/test/test-global-subchannel-pool.ts @@ -0,0 +1,149 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import * as assert from 'assert'; +import * as path from 'path'; + +import * as grpc from '../src'; +import { + sendUnaryData, + Server, + ServerCredentials, + ServerUnaryCall, + ServiceClientConstructor, + ServiceError, +} from '../src'; + +import { loadProtoFile } from './common'; + +const protoFile = path.join(__dirname, 'fixtures', 'echo_service.proto'); +const echoService = loadProtoFile(protoFile) + .EchoService as ServiceClientConstructor; + +describe('Global subchannel pool', () => { + let server: Server; + let serverPort: number; + + let client1: InstanceType; + let client2: InstanceType; + + let promises: Promise[]; + + before(done => { + server = new Server(); + server.addService(echoService.service, { + echo(call: ServerUnaryCall, callback: sendUnaryData) { + callback(null, call.request); + }, + }); + + server.bindAsync( + 'localhost:0', + ServerCredentials.createInsecure(), + (err, port) => { + assert.ifError(err); + serverPort = port; + server.start(); + done(); + } + ); + }); + + beforeEach(() => { + promises = []; + }); + + after(done => { + server.tryShutdown(done); + }); + + function callService(client: InstanceType) { + return new Promise(resolve => { + const request = { value: 'test value', value2: 3 }; + + client.echo(request, (error: ServiceError, response: any) => { + assert.ifError(error); + assert.deepStrictEqual(response, request); + resolve(); + }); + }); + } + + function connect() { + const grpcOptions = { + 'grpc.use_local_subchannel_pool': 0, + }; + + client1 = new echoService( + `127.0.0.1:${serverPort}`, + grpc.credentials.createInsecure(), + grpcOptions + ); + + client2 = new echoService( + `127.0.0.1:${serverPort}`, + grpc.credentials.createInsecure(), + grpcOptions + ); + } + + /* This is a regression test for a bug where client1.close in the + * waitForReady callback would cause the subchannel to transition to IDLE + * even though client2 is also using it. */ + it('Should handle client.close calls in waitForReady', done => { + connect(); + + promises.push( + new Promise(resolve => { + client1.waitForReady(Date.now() + 50, error => { + assert.ifError(error); + client1.close(); + resolve(); + }); + }) + ); + + promises.push( + new Promise(resolve => { + client2.waitForReady(Date.now() + 50, error => { + assert.ifError(error); + resolve(); + }); + }) + ); + + Promise.all(promises).then(() => { + done(); + }); + }); + + it('Call the service', done => { + promises.push(callService(client2)); + + Promise.all(promises).then(() => { + done(); + }); + }); + + it('Should complete the client lifecycle without error', done => { + setTimeout(() => { + client1.close(); + client2.close(); + done(); + }, 500); + }); +}); diff --git a/packages/grpc-js/test/test-idle-timer.ts b/packages/grpc-js/test/test-idle-timer.ts new file mode 100644 index 000000000..3f2a8ed20 --- /dev/null +++ b/packages/grpc-js/test/test-idle-timer.ts @@ -0,0 +1,257 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import * as assert from 'assert'; +import * as grpc from '../src'; +import { TestClient, TestServer } from './common'; + +describe('Channel idle timer', () => { + let server: TestServer; + let client: TestClient | null = null; + before(() => { + server = new TestServer(false); + return server.start(); + }); + afterEach(() => { + if (client) { + client.close(); + client = null; + } + }); + after(() => { + server.shutdown(); + }); + it('Should go idle after the specified time after a request ends', function (done) { + this.timeout(5000); + client = TestClient.createFromServer(server, { + 'grpc.client_idle_timeout_ms': 1000, + }); + client.sendRequest(error => { + assert.ifError(error); + assert.strictEqual( + client!.getChannelState(), + grpc.connectivityState.READY + ); + setTimeout(() => { + assert.strictEqual( + client!.getChannelState(), + grpc.connectivityState.IDLE + ); + done(); + }, 1100); + }); + }); + it('Should be able to make a request after going idle', function (done) { + this.timeout(5000); + client = TestClient.createFromServer(server, { + 'grpc.client_idle_timeout_ms': 1000, + }); + client.sendRequest(error => { + assert.ifError(error); + assert.strictEqual( + client!.getChannelState(), + grpc.connectivityState.READY + ); + setTimeout(() => { + assert.strictEqual( + client!.getChannelState(), + grpc.connectivityState.IDLE + ); + client!.sendRequest(error => { + assert.ifError(error); + done(); + }); + }, 1100); + }); + }); + it('Should go idle after the specified time after waitForReady ends', function (done) { + this.timeout(5000); + client = TestClient.createFromServer(server, { + 'grpc.client_idle_timeout_ms': 1000, + }); + const deadline = new Date(); + deadline.setSeconds(deadline.getSeconds() + 3); + client.waitForReady(deadline, error => { + assert.ifError(error); + assert.strictEqual( + client!.getChannelState(), + grpc.connectivityState.READY + ); + setTimeout(() => { + assert.strictEqual( + client!.getChannelState(), + grpc.connectivityState.IDLE + ); + done(); + }, 1100); + }); + }); + it('Should ensure that the timeout is at least 1 second', function (done) { + client = TestClient.createFromServer(server, { + 'grpc.client_idle_timeout_ms': 50, + }); + client.sendRequest(error => { + assert.ifError(error); + assert.strictEqual( + client!.getChannelState(), + grpc.connectivityState.READY + ); + setTimeout(() => { + // Should still be ready after 100ms + assert.strictEqual( + client!.getChannelState(), + grpc.connectivityState.READY + ); + setTimeout(() => { + // Should go IDLE after another second + assert.strictEqual( + client!.getChannelState(), + grpc.connectivityState.IDLE + ); + done(); + }, 1000); + }, 100); + }); + }); +}); + +describe('Channel idle timer with UDS', () => { + let server: TestServer; + let client: TestClient | null = null; + before(() => { + server = new TestServer(false); + return server.startUds(); + }); + afterEach(() => { + if (client) { + client.close(); + client = null; + } + }); + after(() => { + server.shutdown(); + }); + it('Should be able to make a request after going idle', function (done) { + this.timeout(5000); + client = TestClient.createFromServer(server, { + 'grpc.client_idle_timeout_ms': 1000, + }); + client.sendRequest(error => { + assert.ifError(error); + assert.strictEqual( + client!.getChannelState(), + grpc.connectivityState.READY + ); + setTimeout(() => { + assert.strictEqual( + client!.getChannelState(), + grpc.connectivityState.IDLE + ); + client!.sendRequest(error => { + assert.ifError(error); + done(); + }); + }, 1100); + }); + }); +}); + +describe('Server idle timer', () => { + let server: TestServer; + let client: TestClient | null = null; + before(() => { + server = new TestServer(false, { + 'grpc.max_connection_idle_ms': 500, // small for testing purposes + }); + return server.start(); + }); + afterEach(() => { + if (client) { + client.close(); + client = null; + } + }); + after(() => { + server.shutdown(); + }); + + it('Should go idle after the specified time after a request ends', function (done) { + this.timeout(5000); + client = TestClient.createFromServer(server); + client.sendRequest(error => { + assert.ifError(error); + assert.strictEqual( + client!.getChannelState(), + grpc.connectivityState.READY + ); + client?.waitForClientState( + Date.now() + 600, + grpc.connectivityState.IDLE, + done + ); + }); + }); + + it('Should be able to make a request after going idle', function (done) { + this.timeout(5000); + client = TestClient.createFromServer(server); + client.sendRequest(error => { + assert.ifError(error); + assert.strictEqual( + client!.getChannelState(), + grpc.connectivityState.READY + ); + + client!.waitForClientState( + Date.now() + 600, + grpc.connectivityState.IDLE, + err => { + if (err) return done(err); + + assert.strictEqual( + client!.getChannelState(), + grpc.connectivityState.IDLE + ); + client!.sendRequest(error => { + assert.ifError(error); + done(); + }); + } + ); + }); + }); + + it('Should go idle after the specified time after waitForReady ends', function (done) { + this.timeout(5000); + client = TestClient.createFromServer(server); + const deadline = new Date(); + deadline.setSeconds(deadline.getSeconds() + 3); + client.waitForReady(deadline, error => { + assert.ifError(error); + assert.strictEqual( + client!.getChannelState(), + grpc.connectivityState.READY + ); + + client!.waitForClientState( + Date.now() + 600, + grpc.connectivityState.IDLE, + done + ); + }); + }); +}); diff --git a/packages/grpc-js/test/test-local-subchannel-pool.ts b/packages/grpc-js/test/test-local-subchannel-pool.ts new file mode 100644 index 000000000..00da9c64e --- /dev/null +++ b/packages/grpc-js/test/test-local-subchannel-pool.ts @@ -0,0 +1,79 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import * as assert from 'assert'; +import * as path from 'path'; +import * as grpc from '../src'; +import { + sendUnaryData, + Server, + ServerCredentials, + ServerUnaryCall, + ServiceClientConstructor, + ServiceError, +} from '../src'; +import { loadProtoFile } from './common'; + +const protoFile = path.join(__dirname, 'fixtures', 'echo_service.proto'); +const echoService = loadProtoFile(protoFile) + .EchoService as ServiceClientConstructor; + +describe('Local subchannel pool', () => { + let server: Server; + let serverPort: number; + + before(done => { + server = new Server(); + server.addService(echoService.service, { + echo(call: ServerUnaryCall, callback: sendUnaryData) { + callback(null, call.request); + }, + }); + + server.bindAsync( + 'localhost:0', + ServerCredentials.createInsecure(), + (err, port) => { + assert.ifError(err); + serverPort = port; + server.start(); + done(); + } + ); + }); + + after(done => { + server.tryShutdown(done); + }); + + it('should complete the client lifecycle without error', done => { + const client = new echoService( + `localhost:${serverPort}`, + grpc.credentials.createInsecure(), + { 'grpc.use_local_subchannel_pool': 1 } + ); + client.echo( + { value: 'test value', value2: 3 }, + (error: ServiceError, response: any) => { + assert.ifError(error); + assert.deepStrictEqual(response, { value: 'test value', value2: 3 }); + client.close(); + done(); + } + ); + }); +}); diff --git a/packages/grpc-js/test/test-logging.ts b/packages/grpc-js/test/test-logging.ts index c1601cbc5..d275158cf 100644 --- a/packages/grpc-js/test/test-logging.ts +++ b/packages/grpc-js/test/test-logging.ts @@ -27,10 +27,6 @@ describe('Logging', () => { grpc.setLogVerbosity(grpc.logVerbosity.DEBUG); }); - it('logger defaults to console', () => { - assert.strictEqual(logging.getLogger(), console); - }); - it('sets the logger to a new value', () => { const logger: Partial = {}; diff --git a/packages/grpc-js/test/test-outlier-detection.ts b/packages/grpc-js/test/test-outlier-detection.ts new file mode 100644 index 000000000..78b972303 --- /dev/null +++ b/packages/grpc-js/test/test-outlier-detection.ts @@ -0,0 +1,592 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import * as assert from 'assert'; +import * as path from 'path'; +import * as grpc from '../src'; +import { loadProtoFile } from './common'; +import { OutlierDetectionLoadBalancingConfig } from '../src/load-balancer-outlier-detection'; + +function multiDone(done: Mocha.Done, target: number) { + let count = 0; + return (error?: any) => { + if (error) { + done(error); + } + count++; + if (count >= target) { + done(); + } + }; +} + +const defaultOutlierDetectionServiceConfig = { + methodConfig: [], + loadBalancingConfig: [ + { + outlier_detection: { + success_rate_ejection: {}, + failure_percentage_ejection: {}, + child_policy: [{ round_robin: {} }], + }, + }, + ], +}; + +const defaultOutlierDetectionServiceConfigString = JSON.stringify( + defaultOutlierDetectionServiceConfig +); + +const successRateOutlierDetectionServiceConfig = { + methodConfig: [], + loadBalancingConfig: [ + { + outlier_detection: { + interval: { + seconds: 1, + nanos: 0, + }, + base_ejection_time: { + seconds: 3, + nanos: 0, + }, + success_rate_ejection: { + request_volume: 5, + }, + child_policy: [{ round_robin: {} }], + }, + }, + ], +}; + +const successRateOutlierDetectionServiceConfigString = JSON.stringify( + successRateOutlierDetectionServiceConfig +); + +const failurePercentageOutlierDetectionServiceConfig = { + methodConfig: [], + loadBalancingConfig: [ + { + outlier_detection: { + interval: { + seconds: 1, + nanos: 0, + }, + base_ejection_time: { + seconds: 3, + nanos: 0, + }, + failure_percentage_ejection: { + request_volume: 5, + }, + child_policy: [{ round_robin: {} }], + }, + }, + ], +}; + +const falurePercentageOutlierDetectionServiceConfigString = JSON.stringify( + failurePercentageOutlierDetectionServiceConfig +); + +const goodService = { + echo: ( + call: grpc.ServerUnaryCall, + callback: grpc.sendUnaryData + ) => { + callback(null, call.request); + }, +}; + +const badService = { + echo: ( + call: grpc.ServerUnaryCall, + callback: grpc.sendUnaryData + ) => { + callback({ + code: grpc.status.PERMISSION_DENIED, + details: 'Permission denied', + }); + }, +}; + +const protoFile = path.join(__dirname, 'fixtures', 'echo_service.proto'); +const EchoService = loadProtoFile(protoFile) + .EchoService as grpc.ServiceClientConstructor; + +describe('Outlier detection config validation', () => { + describe('interval', () => { + it('Should reject a negative interval', () => { + const loadBalancingConfig = { + interval: { + seconds: -1, + nanos: 0, + }, + child_policy: [{ round_robin: {} }], + }; + assert.throws(() => { + OutlierDetectionLoadBalancingConfig.createFromJson(loadBalancingConfig); + }, /interval parse error: values out of range for non-negative Duaration/); + }); + it('Should reject a large interval', () => { + const loadBalancingConfig = { + interval: { + seconds: 1e12, + nanos: 0, + }, + child_policy: [{ round_robin: {} }], + }; + assert.throws(() => { + OutlierDetectionLoadBalancingConfig.createFromJson(loadBalancingConfig); + }, /interval parse error: values out of range for non-negative Duaration/); + }); + it('Should reject a negative interval.nanos', () => { + const loadBalancingConfig = { + interval: { + seconds: 0, + nanos: -1, + }, + child_policy: [{ round_robin: {} }], + }; + assert.throws(() => { + OutlierDetectionLoadBalancingConfig.createFromJson(loadBalancingConfig); + }, /interval parse error: values out of range for non-negative Duaration/); + }); + it('Should reject a large interval.nanos', () => { + const loadBalancingConfig = { + interval: { + seconds: 0, + nanos: 1e12, + }, + child_policy: [{ round_robin: {} }], + }; + assert.throws(() => { + OutlierDetectionLoadBalancingConfig.createFromJson(loadBalancingConfig); + }, /interval parse error: values out of range for non-negative Duaration/); + }); + }); + describe('base_ejection_time', () => { + it('Should reject a negative base_ejection_time', () => { + const loadBalancingConfig = { + base_ejection_time: { + seconds: -1, + nanos: 0, + }, + child_policy: [{ round_robin: {} }], + }; + assert.throws(() => { + OutlierDetectionLoadBalancingConfig.createFromJson(loadBalancingConfig); + }, /base_ejection_time parse error: values out of range for non-negative Duaration/); + }); + it('Should reject a large base_ejection_time', () => { + const loadBalancingConfig = { + base_ejection_time: { + seconds: 1e12, + nanos: 0, + }, + child_policy: [{ round_robin: {} }], + }; + assert.throws(() => { + OutlierDetectionLoadBalancingConfig.createFromJson(loadBalancingConfig); + }, /base_ejection_time parse error: values out of range for non-negative Duaration/); + }); + it('Should reject a negative base_ejection_time.nanos', () => { + const loadBalancingConfig = { + base_ejection_time: { + seconds: 0, + nanos: -1, + }, + child_policy: [{ round_robin: {} }], + }; + assert.throws(() => { + OutlierDetectionLoadBalancingConfig.createFromJson(loadBalancingConfig); + }, /base_ejection_time parse error: values out of range for non-negative Duaration/); + }); + it('Should reject a large base_ejection_time.nanos', () => { + const loadBalancingConfig = { + base_ejection_time: { + seconds: 0, + nanos: 1e12, + }, + child_policy: [{ round_robin: {} }], + }; + assert.throws(() => { + OutlierDetectionLoadBalancingConfig.createFromJson(loadBalancingConfig); + }, /base_ejection_time parse error: values out of range for non-negative Duaration/); + }); + }); + describe('max_ejection_time', () => { + it('Should reject a negative max_ejection_time', () => { + const loadBalancingConfig = { + max_ejection_time: { + seconds: -1, + nanos: 0, + }, + child_policy: [{ round_robin: {} }], + }; + assert.throws(() => { + OutlierDetectionLoadBalancingConfig.createFromJson(loadBalancingConfig); + }, /max_ejection_time parse error: values out of range for non-negative Duaration/); + }); + it('Should reject a large max_ejection_time', () => { + const loadBalancingConfig = { + max_ejection_time: { + seconds: 1e12, + nanos: 0, + }, + child_policy: [{ round_robin: {} }], + }; + assert.throws(() => { + OutlierDetectionLoadBalancingConfig.createFromJson(loadBalancingConfig); + }, /max_ejection_time parse error: values out of range for non-negative Duaration/); + }); + it('Should reject a negative max_ejection_time.nanos', () => { + const loadBalancingConfig = { + max_ejection_time: { + seconds: 0, + nanos: -1, + }, + child_policy: [{ round_robin: {} }], + }; + assert.throws(() => { + OutlierDetectionLoadBalancingConfig.createFromJson(loadBalancingConfig); + }, /max_ejection_time parse error: values out of range for non-negative Duaration/); + }); + it('Should reject a large max_ejection_time.nanos', () => { + const loadBalancingConfig = { + max_ejection_time: { + seconds: 0, + nanos: 1e12, + }, + child_policy: [{ round_robin: {} }], + }; + assert.throws(() => { + OutlierDetectionLoadBalancingConfig.createFromJson(loadBalancingConfig); + }, /max_ejection_time parse error: values out of range for non-negative Duaration/); + }); + }); + describe('max_ejection_percent', () => { + it('Should reject a value above 100', () => { + const loadBalancingConfig = { + max_ejection_percent: 101, + child_policy: [{ round_robin: {} }], + }; + assert.throws(() => { + OutlierDetectionLoadBalancingConfig.createFromJson(loadBalancingConfig); + }, /max_ejection_percent parse error: value out of range for percentage/); + }); + it('Should reject a negative value', () => { + const loadBalancingConfig = { + max_ejection_percent: -1, + child_policy: [{ round_robin: {} }], + }; + assert.throws(() => { + OutlierDetectionLoadBalancingConfig.createFromJson(loadBalancingConfig); + }, /max_ejection_percent parse error: value out of range for percentage/); + }); + }); + describe('success_rate_ejection.enforcement_percentage', () => { + it('Should reject a value above 100', () => { + const loadBalancingConfig = { + success_rate_ejection: { + enforcement_percentage: 101, + }, + child_policy: [{ round_robin: {} }], + }; + assert.throws(() => { + OutlierDetectionLoadBalancingConfig.createFromJson(loadBalancingConfig); + }, /success_rate_ejection\.enforcement_percentage parse error: value out of range for percentage/); + }); + it('Should reject a negative value', () => { + const loadBalancingConfig = { + success_rate_ejection: { + enforcement_percentage: -1, + }, + child_policy: [{ round_robin: {} }], + }; + assert.throws(() => { + OutlierDetectionLoadBalancingConfig.createFromJson(loadBalancingConfig); + }, /success_rate_ejection\.enforcement_percentage parse error: value out of range for percentage/); + }); + }); + describe('failure_percentage_ejection.threshold', () => { + it('Should reject a value above 100', () => { + const loadBalancingConfig = { + failure_percentage_ejection: { + threshold: 101, + }, + child_policy: [{ round_robin: {} }], + }; + assert.throws(() => { + OutlierDetectionLoadBalancingConfig.createFromJson(loadBalancingConfig); + }, /failure_percentage_ejection\.threshold parse error: value out of range for percentage/); + }); + it('Should reject a negative value', () => { + const loadBalancingConfig = { + failure_percentage_ejection: { + threshold: -1, + }, + child_policy: [{ round_robin: {} }], + }; + assert.throws(() => { + OutlierDetectionLoadBalancingConfig.createFromJson(loadBalancingConfig); + }, /failure_percentage_ejection\.threshold parse error: value out of range for percentage/); + }); + }); + describe('failure_percentage_ejection.enforcement_percentage', () => { + it('Should reject a value above 100', () => { + const loadBalancingConfig = { + failure_percentage_ejection: { + enforcement_percentage: 101, + }, + child_policy: [{ round_robin: {} }], + }; + assert.throws(() => { + OutlierDetectionLoadBalancingConfig.createFromJson(loadBalancingConfig); + }, /failure_percentage_ejection\.enforcement_percentage parse error: value out of range for percentage/); + }); + it('Should reject a negative value', () => { + const loadBalancingConfig = { + failure_percentage_ejection: { + enforcement_percentage: -1, + }, + child_policy: [{ round_robin: {} }], + }; + assert.throws(() => { + OutlierDetectionLoadBalancingConfig.createFromJson(loadBalancingConfig); + }, /failure_percentage_ejection\.enforcement_percentage parse error: value out of range for percentage/); + }); + }); + describe('child_policy', () => { + it('Should reject a pick_first child_policy', () => { + const loadBalancingConfig = { + child_policy: [{ pick_first: {} }], + }; + assert.throws(() => { + OutlierDetectionLoadBalancingConfig.createFromJson(loadBalancingConfig); + }, /outlier_detection LB policy cannot have a pick_first child policy/); + }); + }); +}); + +describe('Outlier detection', () => { + const GOOD_PORTS = 4; + let goodServer: grpc.Server; + let badServer: grpc.Server; + const goodPorts: number[] = []; + let badPort: number; + before(done => { + const eachDone = multiDone(() => { + goodServer.start(); + badServer.start(); + done(); + }, GOOD_PORTS + 1); + goodServer = new grpc.Server(); + goodServer.addService(EchoService.service, goodService); + for (let i = 0; i < GOOD_PORTS; i++) { + goodServer.bindAsync( + 'localhost:0', + grpc.ServerCredentials.createInsecure(), + (error, port) => { + if (error) { + eachDone(error); + return; + } + goodPorts.push(port); + eachDone(); + } + ); + } + badServer = new grpc.Server(); + badServer.addService(EchoService.service, badService); + badServer.bindAsync( + 'localhost:0', + grpc.ServerCredentials.createInsecure(), + (error, port) => { + if (error) { + eachDone(error); + return; + } + badPort = port; + eachDone(); + } + ); + }); + after(() => { + goodServer.forceShutdown(); + badServer.forceShutdown(); + }); + + function makeManyRequests( + makeOneRequest: (callback: (error?: Error) => void) => void, + total: number, + callback: (error?: Error) => void + ) { + if (total === 0) { + callback(); + return; + } + makeOneRequest(error => { + if (error) { + callback(error); + return; + } + makeManyRequests(makeOneRequest, total - 1, callback); + }); + } + + it('Should allow normal operation with one server', done => { + const client = new EchoService( + `localhost:${goodPorts[0]}`, + grpc.credentials.createInsecure(), + { 'grpc.service_config': defaultOutlierDetectionServiceConfigString } + ); + client.echo( + { value: 'test value', value2: 3 }, + (error: grpc.ServiceError, response: any) => { + assert.ifError(error); + assert.deepStrictEqual(response, { value: 'test value', value2: 3 }); + done(); + } + ); + }); + describe('Success rate', () => { + let makeCheckedRequest: (callback: () => void) => void; + let makeUncheckedRequest: (callback: (error?: Error) => void) => void; + before(() => { + const target = + 'ipv4:///' + + goodPorts.map(port => `127.0.0.1:${port}`).join(',') + + `,127.0.0.1:${badPort}`; + const client = new EchoService( + target, + grpc.credentials.createInsecure(), + { + 'grpc.service_config': successRateOutlierDetectionServiceConfigString, + } + ); + makeUncheckedRequest = (callback: () => void) => { + client.echo( + { value: 'test value', value2: 3 }, + (error: grpc.ServiceError, response: any) => { + callback(); + } + ); + }; + makeCheckedRequest = (callback: (error?: Error) => void) => { + client.echo( + { value: 'test value', value2: 3 }, + (error: grpc.ServiceError, response: any) => { + callback(error); + } + ); + }; + }); + it('Should eject a server if it is failing requests', done => { + // Make a large volume of requests + makeManyRequests(makeUncheckedRequest, 50, () => { + // Give outlier detection time to run ejection checks + setTimeout(() => { + // Make enough requests to go around all servers + makeManyRequests(makeCheckedRequest, 10, done); + }, 1000); + }); + }); + it('Should uneject a server after the ejection period', function (done) { + this.timeout(5000); + makeManyRequests(makeUncheckedRequest, 50, () => { + setTimeout(() => { + makeManyRequests(makeCheckedRequest, 10, error => { + if (error) { + done(error); + return; + } + setTimeout(() => { + makeManyRequests(makeCheckedRequest, 10, error => { + assert(error); + done(); + }); + }, 3000); + }); + }, 1000); + }); + }); + }); + describe('Failure percentage', () => { + let makeCheckedRequest: (callback: () => void) => void; + let makeUncheckedRequest: (callback: (error?: Error) => void) => void; + before(() => { + const target = + 'ipv4:///' + + goodPorts.map(port => `127.0.0.1:${port}`).join(',') + + `,127.0.0.1:${badPort}`; + const client = new EchoService( + target, + grpc.credentials.createInsecure(), + { + 'grpc.service_config': + falurePercentageOutlierDetectionServiceConfigString, + } + ); + makeUncheckedRequest = (callback: () => void) => { + client.echo( + { value: 'test value', value2: 3 }, + (error: grpc.ServiceError, response: any) => { + callback(); + } + ); + }; + makeCheckedRequest = (callback: (error?: Error) => void) => { + client.echo( + { value: 'test value', value2: 3 }, + (error: grpc.ServiceError, response: any) => { + callback(error); + } + ); + }; + }); + it('Should eject a server if it is failing requests', done => { + // Make a large volume of requests + makeManyRequests(makeUncheckedRequest, 50, () => { + // Give outlier detection time to run ejection checks + setTimeout(() => { + // Make enough requests to go around all servers + makeManyRequests(makeCheckedRequest, 10, done); + }, 1000); + }); + }); + it('Should uneject a server after the ejection period', function (done) { + this.timeout(5000); + makeManyRequests(makeUncheckedRequest, 50, () => { + setTimeout(() => { + makeManyRequests(makeCheckedRequest, 10, error => { + if (error) { + done(error); + return; + } + setTimeout(() => { + makeManyRequests(makeCheckedRequest, 10, error => { + assert(error); + done(); + }); + }, 3000); + }); + }, 1000); + }); + }); + }); +}); diff --git a/packages/grpc-js/test/test-pick-first.ts b/packages/grpc-js/test/test-pick-first.ts new file mode 100644 index 000000000..9803a5853 --- /dev/null +++ b/packages/grpc-js/test/test-pick-first.ts @@ -0,0 +1,829 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import * as assert from 'assert'; + +import { ConnectivityState } from '../src/connectivity-state'; +import { + ChannelControlHelper, + createChildChannelControlHelper, +} from '../src/load-balancer'; +import { + PickFirstLoadBalancer, + PickFirstLoadBalancingConfig, + shuffled, +} from '../src/load-balancer-pick-first'; +import { Metadata } from '../src/metadata'; +import { Picker } from '../src/picker'; +import { Endpoint, subchannelAddressToString } from '../src/subchannel-address'; +import { MockSubchannel, TestClient, TestServer } from './common'; + +function updateStateCallBackForExpectedStateSequence( + expectedStateSequence: ConnectivityState[], + done: Mocha.Done +) { + const actualStateSequence: ConnectivityState[] = []; + let lastPicker: Picker | null = null; + let finished = false; + return (connectivityState: ConnectivityState, picker: Picker) => { + if (finished) { + return; + } + // Ignore duplicate state transitions + if ( + connectivityState === actualStateSequence[actualStateSequence.length - 1] + ) { + // Ignore READY duplicate state transitions if the picked subchannel is the same + if ( + connectivityState !== ConnectivityState.READY || + lastPicker?.pick({ extraPickInfo: {}, metadata: new Metadata() }) + ?.subchannel === + picker.pick({ extraPickInfo: {}, metadata: new Metadata() }) + .subchannel + ) { + return; + } + } + if ( + expectedStateSequence[actualStateSequence.length] !== connectivityState + ) { + finished = true; + done( + new Error( + `Unexpected state ${ + ConnectivityState[connectivityState] + } after [${actualStateSequence.map( + value => ConnectivityState[value] + )}]` + ) + ); + return; + } + actualStateSequence.push(connectivityState); + lastPicker = picker; + if (actualStateSequence.length === expectedStateSequence.length) { + finished = true; + done(); + } + }; +} + +describe('Shuffler', () => { + it('Should maintain the multiset of elements from the original array', () => { + const originalArray = [1, 2, 2, 3, 3, 3, 4, 4, 5]; + for (let i = 0; i < 100; i++) { + assert.deepStrictEqual( + shuffled(originalArray).sort((a, b) => a - b), + originalArray + ); + } + }); +}); + +describe('pick_first load balancing policy', () => { + const config = new PickFirstLoadBalancingConfig(false); + let subchannels: MockSubchannel[] = []; + const baseChannelControlHelper: ChannelControlHelper = { + createSubchannel: (subchannelAddress, subchannelArgs) => { + const subchannel = new MockSubchannel( + subchannelAddressToString(subchannelAddress) + ); + subchannels.push(subchannel); + return subchannel; + }, + addChannelzChild: () => {}, + removeChannelzChild: () => {}, + requestReresolution: () => {}, + updateState: () => {}, + }; + beforeEach(() => { + subchannels = []; + }); + it('Should report READY when a subchannel connects', done => { + const channelControlHelper = createChildChannelControlHelper( + baseChannelControlHelper, + { + updateState: updateStateCallBackForExpectedStateSequence( + [ConnectivityState.CONNECTING, ConnectivityState.READY], + done + ), + } + ); + const pickFirst = new PickFirstLoadBalancer(channelControlHelper, {}); + pickFirst.updateAddressList( + [{ addresses: [{ host: 'localhost', port: 1 }] }], + config + ); + process.nextTick(() => { + subchannels[0].transitionToState(ConnectivityState.READY); + }); + }); + it('Should report READY when a subchannel other than the first connects', done => { + const channelControlHelper = createChildChannelControlHelper( + baseChannelControlHelper, + { + updateState: updateStateCallBackForExpectedStateSequence( + [ConnectivityState.CONNECTING, ConnectivityState.READY], + done + ), + } + ); + const pickFirst = new PickFirstLoadBalancer(channelControlHelper, {}); + pickFirst.updateAddressList( + [ + { addresses: [{ host: 'localhost', port: 1 }] }, + { addresses: [{ host: 'localhost', port: 2 }] }, + ], + config + ); + process.nextTick(() => { + subchannels[1].transitionToState(ConnectivityState.READY); + }); + }); + it('Should report READY when a subchannel other than the first in the same endpoint connects', done => { + const channelControlHelper = createChildChannelControlHelper( + baseChannelControlHelper, + { + updateState: updateStateCallBackForExpectedStateSequence( + [ConnectivityState.CONNECTING, ConnectivityState.READY], + done + ), + } + ); + const pickFirst = new PickFirstLoadBalancer(channelControlHelper, {}); + pickFirst.updateAddressList( + [ + { + addresses: [ + { host: 'localhost', port: 1 }, + { host: 'localhost', port: 2 }, + ], + }, + ], + config + ); + process.nextTick(() => { + subchannels[1].transitionToState(ConnectivityState.READY); + }); + }); + it('Should report READY when updated with a subchannel that is already READY', done => { + const channelControlHelper = createChildChannelControlHelper( + baseChannelControlHelper, + { + createSubchannel: (subchannelAddress, subchannelArgs) => { + const subchannel = new MockSubchannel( + subchannelAddressToString(subchannelAddress), + ConnectivityState.READY + ); + subchannels.push(subchannel); + return subchannel; + }, + updateState: updateStateCallBackForExpectedStateSequence( + [ConnectivityState.READY], + done + ), + } + ); + const pickFirst = new PickFirstLoadBalancer(channelControlHelper, {}); + pickFirst.updateAddressList( + [{ addresses: [{ host: 'localhost', port: 1 }] }], + config + ); + }); + it('Should stay CONNECTING if only some subchannels fail to connect', done => { + const channelControlHelper = createChildChannelControlHelper( + baseChannelControlHelper, + { + updateState: updateStateCallBackForExpectedStateSequence( + [ConnectivityState.CONNECTING], + done + ), + } + ); + const pickFirst = new PickFirstLoadBalancer(channelControlHelper, {}); + pickFirst.updateAddressList( + [ + { addresses: [{ host: 'localhost', port: 1 }] }, + { addresses: [{ host: 'localhost', port: 2 }] }, + ], + config + ); + process.nextTick(() => { + subchannels[0].transitionToState(ConnectivityState.TRANSIENT_FAILURE); + }); + }); + it('Should enter TRANSIENT_FAILURE when subchannels fail to connect', done => { + const channelControlHelper = createChildChannelControlHelper( + baseChannelControlHelper, + { + updateState: updateStateCallBackForExpectedStateSequence( + [ConnectivityState.CONNECTING, ConnectivityState.TRANSIENT_FAILURE], + done + ), + } + ); + const pickFirst = new PickFirstLoadBalancer(channelControlHelper, {}); + pickFirst.updateAddressList( + [ + { addresses: [{ host: 'localhost', port: 1 }] }, + { addresses: [{ host: 'localhost', port: 2 }] }, + ], + config + ); + process.nextTick(() => { + subchannels[0].transitionToState(ConnectivityState.TRANSIENT_FAILURE); + }); + process.nextTick(() => { + subchannels[1].transitionToState(ConnectivityState.TRANSIENT_FAILURE); + }); + }); + it('Should stay in TRANSIENT_FAILURE if subchannels go back to CONNECTING', done => { + const channelControlHelper = createChildChannelControlHelper( + baseChannelControlHelper, + { + updateState: updateStateCallBackForExpectedStateSequence( + [ConnectivityState.CONNECTING, ConnectivityState.TRANSIENT_FAILURE], + done + ), + } + ); + const pickFirst = new PickFirstLoadBalancer(channelControlHelper, {}); + pickFirst.updateAddressList( + [ + { addresses: [{ host: 'localhost', port: 1 }] }, + { addresses: [{ host: 'localhost', port: 2 }] }, + ], + config + ); + process.nextTick(() => { + subchannels[0].transitionToState(ConnectivityState.TRANSIENT_FAILURE); + process.nextTick(() => { + subchannels[1].transitionToState(ConnectivityState.TRANSIENT_FAILURE); + process.nextTick(() => { + subchannels[0].transitionToState(ConnectivityState.CONNECTING); + process.nextTick(() => { + subchannels[1].transitionToState(ConnectivityState.CONNECTING); + }); + }); + }); + }); + }); + it('Should immediately enter TRANSIENT_FAILURE if subchannels start in TRANSIENT_FAILURE', done => { + const channelControlHelper = createChildChannelControlHelper( + baseChannelControlHelper, + { + createSubchannel: (subchannelAddress, subchannelArgs) => { + const subchannel = new MockSubchannel( + subchannelAddressToString(subchannelAddress), + ConnectivityState.TRANSIENT_FAILURE + ); + subchannels.push(subchannel); + return subchannel; + }, + updateState: updateStateCallBackForExpectedStateSequence( + [ConnectivityState.TRANSIENT_FAILURE], + done + ), + } + ); + const pickFirst = new PickFirstLoadBalancer(channelControlHelper, {}); + pickFirst.updateAddressList( + [ + { addresses: [{ host: 'localhost', port: 1 }] }, + { addresses: [{ host: 'localhost', port: 2 }] }, + ], + config + ); + }); + it('Should enter READY if a subchannel connects after entering TRANSIENT_FAILURE mode', done => { + const channelControlHelper = createChildChannelControlHelper( + baseChannelControlHelper, + { + createSubchannel: (subchannelAddress, subchannelArgs) => { + const subchannel = new MockSubchannel( + subchannelAddressToString(subchannelAddress), + ConnectivityState.TRANSIENT_FAILURE + ); + subchannels.push(subchannel); + return subchannel; + }, + updateState: updateStateCallBackForExpectedStateSequence( + [ConnectivityState.TRANSIENT_FAILURE, ConnectivityState.READY], + done + ), + } + ); + const pickFirst = new PickFirstLoadBalancer(channelControlHelper, {}); + pickFirst.updateAddressList( + [ + { addresses: [{ host: 'localhost', port: 1 }] }, + { addresses: [{ host: 'localhost', port: 2 }] }, + ], + config + ); + process.nextTick(() => { + subchannels[0].transitionToState(ConnectivityState.READY); + }); + }); + it('Should stay in TRANSIENT_FAILURE after an address update with non-READY subchannels', done => { + let currentStartState = ConnectivityState.TRANSIENT_FAILURE; + const channelControlHelper = createChildChannelControlHelper( + baseChannelControlHelper, + { + createSubchannel: (subchannelAddress, subchannelArgs) => { + const subchannel = new MockSubchannel( + subchannelAddressToString(subchannelAddress), + currentStartState + ); + subchannels.push(subchannel); + return subchannel; + }, + updateState: updateStateCallBackForExpectedStateSequence( + [ConnectivityState.TRANSIENT_FAILURE], + done + ), + } + ); + const pickFirst = new PickFirstLoadBalancer(channelControlHelper, {}); + pickFirst.updateAddressList( + [ + { addresses: [{ host: 'localhost', port: 1 }] }, + { addresses: [{ host: 'localhost', port: 2 }] }, + ], + config + ); + process.nextTick(() => { + currentStartState = ConnectivityState.CONNECTING; + pickFirst.updateAddressList( + [ + { addresses: [{ host: 'localhost', port: 1 }] }, + { addresses: [{ host: 'localhost', port: 2 }] }, + ], + config + ); + }); + }); + it('Should transition from TRANSIENT_FAILURE to READY after an address update with a READY subchannel', done => { + let currentStartState = ConnectivityState.TRANSIENT_FAILURE; + const channelControlHelper = createChildChannelControlHelper( + baseChannelControlHelper, + { + createSubchannel: (subchannelAddress, subchannelArgs) => { + const subchannel = new MockSubchannel( + subchannelAddressToString(subchannelAddress), + currentStartState + ); + subchannels.push(subchannel); + return subchannel; + }, + updateState: updateStateCallBackForExpectedStateSequence( + [ConnectivityState.TRANSIENT_FAILURE, ConnectivityState.READY], + done + ), + } + ); + const pickFirst = new PickFirstLoadBalancer(channelControlHelper, {}); + pickFirst.updateAddressList( + [ + { addresses: [{ host: 'localhost', port: 1 }] }, + { addresses: [{ host: 'localhost', port: 2 }] }, + ], + config + ); + process.nextTick(() => { + currentStartState = ConnectivityState.READY; + pickFirst.updateAddressList( + [{ addresses: [{ host: 'localhost', port: 3 }] }], + config + ); + }); + }); + it('Should transition from READY to IDLE if the connected subchannel disconnects', done => { + const currentStartState = ConnectivityState.READY; + const channelControlHelper = createChildChannelControlHelper( + baseChannelControlHelper, + { + createSubchannel: (subchannelAddress, subchannelArgs) => { + const subchannel = new MockSubchannel( + subchannelAddressToString(subchannelAddress), + currentStartState + ); + subchannels.push(subchannel); + return subchannel; + }, + updateState: updateStateCallBackForExpectedStateSequence( + [ConnectivityState.READY, ConnectivityState.IDLE], + done + ), + } + ); + const pickFirst = new PickFirstLoadBalancer(channelControlHelper, {}); + pickFirst.updateAddressList( + [{ addresses: [{ host: 'localhost', port: 1 }] }], + config + ); + process.nextTick(() => { + subchannels[0].transitionToState(ConnectivityState.IDLE); + }); + }); + it('Should transition from READY to CONNECTING if the connected subchannel disconnects after an update', done => { + let currentStartState = ConnectivityState.READY; + const channelControlHelper = createChildChannelControlHelper( + baseChannelControlHelper, + { + createSubchannel: (subchannelAddress, subchannelArgs) => { + const subchannel = new MockSubchannel( + subchannelAddressToString(subchannelAddress), + currentStartState + ); + subchannels.push(subchannel); + return subchannel; + }, + updateState: updateStateCallBackForExpectedStateSequence( + [ConnectivityState.READY, ConnectivityState.CONNECTING], + done + ), + } + ); + const pickFirst = new PickFirstLoadBalancer(channelControlHelper, {}); + pickFirst.updateAddressList( + [{ addresses: [{ host: 'localhost', port: 1 }] }], + config + ); + process.nextTick(() => { + currentStartState = ConnectivityState.IDLE; + pickFirst.updateAddressList( + [{ addresses: [{ host: 'localhost', port: 2 }] }], + config + ); + process.nextTick(() => { + subchannels[0].transitionToState(ConnectivityState.IDLE); + }); + }); + }); + it('Should transition from READY to TRANSIENT_FAILURE if the connected subchannel disconnects and the update fails', done => { + let currentStartState = ConnectivityState.READY; + const channelControlHelper = createChildChannelControlHelper( + baseChannelControlHelper, + { + createSubchannel: (subchannelAddress, subchannelArgs) => { + const subchannel = new MockSubchannel( + subchannelAddressToString(subchannelAddress), + currentStartState + ); + subchannels.push(subchannel); + return subchannel; + }, + updateState: updateStateCallBackForExpectedStateSequence( + [ConnectivityState.READY, ConnectivityState.TRANSIENT_FAILURE], + done + ), + } + ); + const pickFirst = new PickFirstLoadBalancer(channelControlHelper, {}); + pickFirst.updateAddressList( + [{ addresses: [{ host: 'localhost', port: 1 }] }], + config + ); + process.nextTick(() => { + currentStartState = ConnectivityState.TRANSIENT_FAILURE; + pickFirst.updateAddressList( + [{ addresses: [{ host: 'localhost', port: 2 }] }], + config + ); + process.nextTick(() => { + subchannels[0].transitionToState(ConnectivityState.IDLE); + }); + }); + }); + it('Should transition from READY to READY if a subchannel is connected and an update has a connected subchannel', done => { + const currentStartState = ConnectivityState.READY; + const channelControlHelper = createChildChannelControlHelper( + baseChannelControlHelper, + { + createSubchannel: (subchannelAddress, subchannelArgs) => { + const subchannel = new MockSubchannel( + subchannelAddressToString(subchannelAddress), + currentStartState + ); + subchannels.push(subchannel); + return subchannel; + }, + updateState: updateStateCallBackForExpectedStateSequence( + [ConnectivityState.READY, ConnectivityState.READY], + done + ), + } + ); + const pickFirst = new PickFirstLoadBalancer(channelControlHelper, {}); + pickFirst.updateAddressList( + [{ addresses: [{ host: 'localhost', port: 1 }] }], + config + ); + process.nextTick(() => { + pickFirst.updateAddressList( + [{ addresses: [{ host: 'localhost', port: 2 }] }], + config + ); + process.nextTick(() => { + subchannels[0].transitionToState(ConnectivityState.IDLE); + }); + }); + }); + it('Should request reresolution every time each child reports TF', done => { + let reresolutionRequestCount = 0; + const targetReresolutionRequestCount = 3; + const currentStartState = ConnectivityState.IDLE; + const channelControlHelper = createChildChannelControlHelper( + baseChannelControlHelper, + { + createSubchannel: (subchannelAddress, subchannelArgs) => { + const subchannel = new MockSubchannel( + subchannelAddressToString(subchannelAddress), + currentStartState + ); + subchannels.push(subchannel); + return subchannel; + }, + updateState: updateStateCallBackForExpectedStateSequence( + [ConnectivityState.CONNECTING, ConnectivityState.TRANSIENT_FAILURE], + err => + setImmediate(() => { + assert.strictEqual( + reresolutionRequestCount, + targetReresolutionRequestCount + ); + done(err); + }) + ), + requestReresolution: () => { + reresolutionRequestCount += 1; + }, + } + ); + const pickFirst = new PickFirstLoadBalancer(channelControlHelper, {}); + pickFirst.updateAddressList( + [{ addresses: [{ host: 'localhost', port: 1 }] }], + config + ); + process.nextTick(() => { + subchannels[0].transitionToState(ConnectivityState.TRANSIENT_FAILURE); + process.nextTick(() => { + pickFirst.updateAddressList( + [{ addresses: [{ host: 'localhost', port: 2 }] }], + config + ); + process.nextTick(() => { + subchannels[1].transitionToState(ConnectivityState.TRANSIENT_FAILURE); + process.nextTick(() => { + pickFirst.updateAddressList( + [{ addresses: [{ host: 'localhost', port: 3 }] }], + config + ); + process.nextTick(() => { + subchannels[2].transitionToState( + ConnectivityState.TRANSIENT_FAILURE + ); + }); + }); + }); + }); + }); + }); + it('Should request reresolution if the new subchannels are already in TF', done => { + let reresolutionRequestCount = 0; + const targetReresolutionRequestCount = 3; + const currentStartState = ConnectivityState.TRANSIENT_FAILURE; + const channelControlHelper = createChildChannelControlHelper( + baseChannelControlHelper, + { + createSubchannel: (subchannelAddress, subchannelArgs) => { + const subchannel = new MockSubchannel( + subchannelAddressToString(subchannelAddress), + currentStartState + ); + subchannels.push(subchannel); + return subchannel; + }, + updateState: updateStateCallBackForExpectedStateSequence( + [ConnectivityState.TRANSIENT_FAILURE], + err => + setImmediate(() => { + assert.strictEqual( + reresolutionRequestCount, + targetReresolutionRequestCount + ); + done(err); + }) + ), + requestReresolution: () => { + reresolutionRequestCount += 1; + }, + } + ); + const pickFirst = new PickFirstLoadBalancer(channelControlHelper, {}); + pickFirst.updateAddressList( + [{ addresses: [{ host: 'localhost', port: 1 }] }], + config + ); + process.nextTick(() => { + pickFirst.updateAddressList( + [{ addresses: [{ host: 'localhost', port: 2 }] }], + config + ); + process.nextTick(() => { + pickFirst.updateAddressList( + [{ addresses: [{ host: 'localhost', port: 2 }] }], + config + ); + }); + }); + }); + it('Should reconnect to the same address list if exitIdle is called', done => { + const currentStartState = ConnectivityState.READY; + const channelControlHelper = createChildChannelControlHelper( + baseChannelControlHelper, + { + createSubchannel: (subchannelAddress, subchannelArgs) => { + const subchannel = new MockSubchannel( + subchannelAddressToString(subchannelAddress), + currentStartState + ); + subchannels.push(subchannel); + return subchannel; + }, + updateState: updateStateCallBackForExpectedStateSequence( + [ + ConnectivityState.READY, + ConnectivityState.IDLE, + ConnectivityState.READY, + ], + done + ), + } + ); + const pickFirst = new PickFirstLoadBalancer(channelControlHelper, {}); + pickFirst.updateAddressList( + [{ addresses: [{ host: 'localhost', port: 1 }] }], + config + ); + process.nextTick(() => { + subchannels[0].transitionToState(ConnectivityState.IDLE); + process.nextTick(() => { + pickFirst.exitIdle(); + }); + }); + }); + describe('Address list randomization', () => { + const shuffleConfig = new PickFirstLoadBalancingConfig(true); + it('Should pick different subchannels after multiple updates', done => { + const pickedSubchannels: Set = new Set(); + const channelControlHelper = createChildChannelControlHelper( + baseChannelControlHelper, + { + createSubchannel: (subchannelAddress, subchannelArgs) => { + const subchannel = new MockSubchannel( + subchannelAddressToString(subchannelAddress), + ConnectivityState.READY + ); + subchannels.push(subchannel); + return subchannel; + }, + updateState: (connectivityState, picker) => { + if (connectivityState === ConnectivityState.READY) { + const pickedSubchannel = picker.pick({ + extraPickInfo: {}, + metadata: new Metadata(), + }).subchannel; + if (pickedSubchannel) { + pickedSubchannels.add(pickedSubchannel.getAddress()); + } + } + }, + } + ); + const endpoints: Endpoint[] = []; + for (let i = 0; i < 10; i++) { + endpoints.push({ addresses: [{ host: 'localhost', port: i + 1 }] }); + } + const pickFirst = new PickFirstLoadBalancer(channelControlHelper, {}); + /* Pick from 10 subchannels 5 times, with address randomization enabled, + * and verify that at least two different subchannels are picked. The + * probability choosing the same address every time is 1/10,000, which + * I am considering an acceptable flake rate */ + pickFirst.updateAddressList(endpoints, shuffleConfig); + process.nextTick(() => { + pickFirst.updateAddressList(endpoints, shuffleConfig); + process.nextTick(() => { + pickFirst.updateAddressList(endpoints, shuffleConfig); + process.nextTick(() => { + pickFirst.updateAddressList(endpoints, shuffleConfig); + process.nextTick(() => { + pickFirst.updateAddressList(endpoints, shuffleConfig); + process.nextTick(() => { + assert(pickedSubchannels.size > 1); + done(); + }); + }); + }); + }); + }); + }); + it('Should pick the same subchannel if address randomization is disabled', done => { + /* This is the same test as the previous one, except using the config + * that does not enable address randomization. In this case, false + * positive probability is 1/10,000. */ + const pickedSubchannels: Set = new Set(); + const channelControlHelper = createChildChannelControlHelper( + baseChannelControlHelper, + { + createSubchannel: (subchannelAddress, subchannelArgs) => { + const subchannel = new MockSubchannel( + subchannelAddressToString(subchannelAddress), + ConnectivityState.READY + ); + subchannels.push(subchannel); + return subchannel; + }, + updateState: (connectivityState, picker) => { + if (connectivityState === ConnectivityState.READY) { + const pickedSubchannel = picker.pick({ + extraPickInfo: {}, + metadata: new Metadata(), + }).subchannel; + if (pickedSubchannel) { + pickedSubchannels.add(pickedSubchannel.getAddress()); + } + } + }, + } + ); + const endpoints: Endpoint[] = []; + for (let i = 0; i < 10; i++) { + endpoints.push({ addresses: [{ host: 'localhost', port: i + 1 }] }); + } + const pickFirst = new PickFirstLoadBalancer(channelControlHelper, {}); + pickFirst.updateAddressList(endpoints, config); + process.nextTick(() => { + pickFirst.updateAddressList(endpoints, config); + process.nextTick(() => { + pickFirst.updateAddressList(endpoints, config); + process.nextTick(() => { + pickFirst.updateAddressList(endpoints, config); + process.nextTick(() => { + pickFirst.updateAddressList(endpoints, config); + process.nextTick(() => { + assert(pickedSubchannels.size === 1); + done(); + }); + }); + }); + }); + }); + }); + describe('End-to-end functionality', () => { + const serviceConfig = { + methodConfig: [], + loadBalancingConfig: [ + { + pick_first: { + shuffleAddressList: true, + }, + }, + ], + }; + let server: TestServer; + let client: TestClient; + before(async () => { + server = new TestServer(false); + await server.start(); + client = TestClient.createFromServer(server, { + 'grpc.service_config': JSON.stringify(serviceConfig), + }); + }); + after(() => { + client.close(); + server.shutdown(); + }); + it('Should still work with shuffleAddressList set', done => { + client.sendRequest(error => { + done(error); + }); + }); + }); + }); +}); diff --git a/packages/grpc-js/test/test-prototype-pollution.ts b/packages/grpc-js/test/test-prototype-pollution.ts index 6dc4b293c..0d4bdd68c 100644 --- a/packages/grpc-js/test/test-prototype-pollution.ts +++ b/packages/grpc-js/test/test-prototype-pollution.ts @@ -21,11 +21,11 @@ import { loadPackageDefinition } from '../src'; describe('loadPackageDefinition', () => { it('Should not allow prototype pollution', () => { - loadPackageDefinition({'__proto__.polluted': true} as any); - assert.notStrictEqual(({} as any).polluted, true); + loadPackageDefinition({ '__proto__.polluted': true } as any); + assert.notStrictEqual(({} as any).polluted, true); }); it('Should not allow prototype pollution #2', () => { - loadPackageDefinition({'constructor.prototype.polluted': true} as any); - assert.notStrictEqual(({} as any).polluted, true); + loadPackageDefinition({ 'constructor.prototype.polluted': true } as any); + assert.notStrictEqual(({} as any).polluted, true); }); }); diff --git a/packages/grpc-js/test/test-resolver.ts b/packages/grpc-js/test/test-resolver.ts index 756d234ca..c88367285 100644 --- a/packages/grpc-js/test/test-resolver.ts +++ b/packages/grpc-js/test/test-resolver.ts @@ -19,43 +19,59 @@ // tslint:disable no-any import * as assert from 'assert'; import * as resolverManager from '../src/resolver'; +import * as resolver_dns from '../src/resolver-dns'; +import * as resolver_uds from '../src/resolver-uds'; +import * as resolver_ip from '../src/resolver-ip'; import { ServiceConfig } from '../src/service-config'; -import { StatusObject } from '../src/call-stream'; -import { SubchannelAddress, isTcpSubchannelAddress } from '../src/subchannel'; +import { StatusObject } from '../src/call-interface'; +import { + Endpoint, + SubchannelAddress, + endpointToString, + subchannelAddressEqual, +} from '../src/subchannel-address'; import { parseUri, GrpcUri } from '../src/uri-parser'; +function hasMatchingAddress( + endpointList: Endpoint[], + expectedAddress: SubchannelAddress +): boolean { + for (const endpoint of endpointList) { + for (const address of endpoint.addresses) { + if (subchannelAddressEqual(address, expectedAddress)) { + return true; + } + } + } + return false; +} + describe('Name Resolver', () => { - describe('DNS Names', function() { + before(() => { + resolver_dns.setup(); + resolver_uds.setup(); + resolver_ip.setup(); + }); + describe('DNS Names', function () { // For some reason DNS queries sometimes take a long time on Windows this.timeout(4000); - before(() => { - resolverManager.registerAll(); - }); it('Should resolve localhost properly', done => { - const target = resolverManager.mapUriDefaultScheme(parseUri('localhost:50051')!)!; + const target = resolverManager.mapUriDefaultScheme( + parseUri('localhost:50051')! + )!; const listener: resolverManager.ResolverListener = { onSuccessfulResolution: ( - addressList: SubchannelAddress[], + endpointList: Endpoint[], serviceConfig: ServiceConfig | null, serviceConfigError: StatusObject | null ) => { // Only handle the first resolution result listener.onSuccessfulResolution = () => {}; assert( - addressList.some( - addr => - isTcpSubchannelAddress(addr) && - addr.host === '127.0.0.1' && - addr.port === 50051 - ) + hasMatchingAddress(endpointList, { host: '127.0.0.1', port: 50051 }) ); assert( - addressList.some( - addr => - isTcpSubchannelAddress(addr) && - addr.host === '::1' && - addr.port === 50051 - ) + hasMatchingAddress(endpointList, { host: '::1', port: 50051 }) ); done(); }, @@ -67,31 +83,21 @@ describe('Name Resolver', () => { resolver.updateResolution(); }); it('Should default to port 443', done => { - const target = resolverManager.mapUriDefaultScheme(parseUri('localhost')!)!; + const target = resolverManager.mapUriDefaultScheme( + parseUri('localhost')! + )!; const listener: resolverManager.ResolverListener = { onSuccessfulResolution: ( - addressList: SubchannelAddress[], + endpointList: Endpoint[], serviceConfig: ServiceConfig | null, serviceConfigError: StatusObject | null ) => { // Only handle the first resolution result listener.onSuccessfulResolution = () => {}; assert( - addressList.some( - addr => - isTcpSubchannelAddress(addr) && - addr.host === '127.0.0.1' && - addr.port === 443 - ) - ); - assert( - addressList.some( - addr => - isTcpSubchannelAddress(addr) && - addr.host === '::1' && - addr.port === 443 - ) + hasMatchingAddress(endpointList, { host: '127.0.0.1', port: 443 }) ); + assert(hasMatchingAddress(endpointList, { host: '::1', port: 443 })); done(); }, onError: (error: StatusObject) => { @@ -105,19 +111,14 @@ describe('Name Resolver', () => { const target = resolverManager.mapUriDefaultScheme(parseUri('1.2.3.4')!)!; const listener: resolverManager.ResolverListener = { onSuccessfulResolution: ( - addressList: SubchannelAddress[], + endpointList: Endpoint[], serviceConfig: ServiceConfig | null, serviceConfigError: StatusObject | null ) => { // Only handle the first resolution result listener.onSuccessfulResolution = () => {}; assert( - addressList.some( - addr => - isTcpSubchannelAddress(addr) && - addr.host === '1.2.3.4' && - addr.port === 443 - ) + hasMatchingAddress(endpointList, { host: '1.2.3.4', port: 443 }) ); done(); }, @@ -132,20 +133,13 @@ describe('Name Resolver', () => { const target = resolverManager.mapUriDefaultScheme(parseUri('::1')!)!; const listener: resolverManager.ResolverListener = { onSuccessfulResolution: ( - addressList: SubchannelAddress[], + endpointList: Endpoint[], serviceConfig: ServiceConfig | null, serviceConfigError: StatusObject | null ) => { // Only handle the first resolution result listener.onSuccessfulResolution = () => {}; - assert( - addressList.some( - addr => - isTcpSubchannelAddress(addr) && - addr.host === '::1' && - addr.port === 443 - ) - ); + assert(hasMatchingAddress(endpointList, { host: '::1', port: 443 })); done(); }, onError: (error: StatusObject) => { @@ -156,22 +150,19 @@ describe('Name Resolver', () => { resolver.updateResolution(); }); it('Should correctly represent a bracketed ipv6 address', done => { - const target = resolverManager.mapUriDefaultScheme(parseUri('[::1]:50051')!)!; + const target = resolverManager.mapUriDefaultScheme( + parseUri('[::1]:50051')! + )!; const listener: resolverManager.ResolverListener = { onSuccessfulResolution: ( - addressList: SubchannelAddress[], + endpointList: Endpoint[], serviceConfig: ServiceConfig | null, serviceConfigError: StatusObject | null ) => { // Only handle the first resolution result listener.onSuccessfulResolution = () => {}; assert( - addressList.some( - addr => - isTcpSubchannelAddress(addr) && - addr.host === '::1' && - addr.port === 50051 - ) + hasMatchingAddress(endpointList, { host: '::1', port: 50051 }) ); done(); }, @@ -183,16 +174,18 @@ describe('Name Resolver', () => { resolver.updateResolution(); }); it('Should resolve a public address', done => { - const target = resolverManager.mapUriDefaultScheme(parseUri('example.com')!)!; + const target = resolverManager.mapUriDefaultScheme( + parseUri('example.com')! + )!; const listener: resolverManager.ResolverListener = { onSuccessfulResolution: ( - addressList: SubchannelAddress[], + endpointList: Endpoint[], serviceConfig: ServiceConfig | null, serviceConfigError: StatusObject | null ) => { // Only handle the first resolution result listener.onSuccessfulResolution = () => {}; - assert(addressList.length > 0); + assert(endpointList.length > 0); done(); }, onError: (error: StatusObject) => { @@ -202,23 +195,88 @@ describe('Name Resolver', () => { const resolver = resolverManager.createResolver(target, listener, {}); resolver.updateResolution(); }); - it('Should resolve a name with multiple dots', done => { - const target = resolverManager.mapUriDefaultScheme(parseUri('loopback4.unittest.grpc.io')!)!; + // Created DNS TXT record using TXT sample from https://github.com/grpc/proposal/blob/master/A2-service-configs-in-dns.md + // "grpc_config=[{\"serviceConfig\":{\"loadBalancingPolicy\":\"round_robin\",\"methodConfig\":[{\"name\":[{\"service\":\"MyService\",\"method\":\"Foo\"}],\"waitForReady\":true}]}}]" + it.skip('Should resolve a name with TXT service config', done => { + const target = resolverManager.mapUriDefaultScheme( + parseUri('grpctest.kleinsch.com')! + )!; const listener: resolverManager.ResolverListener = { onSuccessfulResolution: ( - addressList: SubchannelAddress[], + endpointList: Endpoint[], + serviceConfig: ServiceConfig | null, + serviceConfigError: StatusObject | null + ) => { + if (serviceConfig !== null) { + assert( + serviceConfig.loadBalancingPolicy === 'round_robin', + 'Should have found round robin LB policy' + ); + done(); + } + }, + onError: (error: StatusObject) => { + done(new Error(`Failed with status ${error.details}`)); + }, + }; + const resolver = resolverManager.createResolver(target, listener, {}); + resolver.updateResolution(); + }); + it.skip('Should not resolve TXT service config if we disabled service config', done => { + const target = resolverManager.mapUriDefaultScheme( + parseUri('grpctest.kleinsch.com')! + )!; + let count = 0; + const listener: resolverManager.ResolverListener = { + onSuccessfulResolution: ( + endpointList: Endpoint[], + serviceConfig: ServiceConfig | null, + serviceConfigError: StatusObject | null + ) => { + assert( + serviceConfig === null, + 'Should not have found service config' + ); + count++; + }, + onError: (error: StatusObject) => { + done(new Error(`Failed with status ${error.details}`)); + }, + }; + const resolver = resolverManager.createResolver(target, listener, { + 'grpc.service_config_disable_resolution': 1, + }); + resolver.updateResolution(); + setTimeout(() => { + assert(count === 1, 'Should have only resolved once'); + done(); + }, 2_000); + }); + /* The DNS entry for loopback4.unittest.grpc.io only has a single A record + * with the address 127.0.0.1, but the Mac DNS resolver appears to use + * NAT64 to create an IPv6 address in that case, so it instead returns + * 64:ff9b::7f00:1. Handling that kind of translation is outside of the + * scope of this test, so we are skipping it. The test primarily exists + * as a regression test for https://github.com/grpc/grpc-node/issues/1044, + * and the test 'Should resolve gRPC interop servers' tests the same thing. + */ + it.skip('Should resolve a name with multiple dots', done => { + const target = resolverManager.mapUriDefaultScheme( + parseUri('loopback4.unittest.grpc.io')! + )!; + const listener: resolverManager.ResolverListener = { + onSuccessfulResolution: ( + endpointList: Endpoint[], serviceConfig: ServiceConfig | null, serviceConfigError: StatusObject | null ) => { // Only handle the first resolution result listener.onSuccessfulResolution = () => {}; assert( - addressList.some( - addr => - isTcpSubchannelAddress(addr) && - addr.host === '127.0.0.1' && - addr.port === 443 - ) + hasMatchingAddress(endpointList, { host: '127.0.0.1', port: 443 }), + `None of [${endpointList.map(addr => + endpointToString(addr) + )}] matched '127.0.0.1:443'` ); done(); }, @@ -232,23 +290,18 @@ describe('Name Resolver', () => { /* TODO(murgatroid99): re-enable this test, once we can get the IPv6 result * consistently */ it.skip('Should resolve a DNS name to an IPv6 address', done => { - const target = resolverManager.mapUriDefaultScheme(parseUri('loopback6.unittest.grpc.io')!)!; + const target = resolverManager.mapUriDefaultScheme( + parseUri('loopback6.unittest.grpc.io')! + )!; const listener: resolverManager.ResolverListener = { onSuccessfulResolution: ( - addressList: SubchannelAddress[], + endpointList: Endpoint[], serviceConfig: ServiceConfig | null, serviceConfigError: StatusObject | null ) => { // Only handle the first resolution result listener.onSuccessfulResolution = () => {}; - assert( - addressList.some( - addr => - isTcpSubchannelAddress(addr) && - addr.host === '::1' && - addr.port === 443 - ) - ); + assert(hasMatchingAddress(endpointList, { host: '::1', port: 443 })); done(); }, onError: (error: StatusObject) => { @@ -258,23 +311,26 @@ describe('Name Resolver', () => { const resolver = resolverManager.createResolver(target, listener, {}); resolver.updateResolution(); }); - it('Should resolve a DNS name to IPv4 and IPv6 addresses', done => { - const target = resolverManager.mapUriDefaultScheme(parseUri('loopback46.unittest.grpc.io')!)!; + /* This DNS name resolves to only the IPv4 address on Windows, and only the + * IPv6 address on Mac. There is no result that we can consistently test + * for here. */ + it.skip('Should resolve a DNS name to IPv4 and IPv6 addresses', done => { + const target = resolverManager.mapUriDefaultScheme( + parseUri('loopback46.unittest.grpc.io')! + )!; const listener: resolverManager.ResolverListener = { onSuccessfulResolution: ( - addressList: SubchannelAddress[], + endpointList: Endpoint[], serviceConfig: ServiceConfig | null, serviceConfigError: StatusObject | null ) => { // Only handle the first resolution result listener.onSuccessfulResolution = () => {}; assert( - addressList.some( - addr => - isTcpSubchannelAddress(addr) && - addr.host === '127.0.0.1' && - addr.port === 443 - ) + hasMatchingAddress(endpointList, { host: '127.0.0.1', port: 443 }), + `None of [${endpointList.map(addr => + endpointToString(addr) + )}] matched '127.0.0.1:443'` ); /* TODO(murgatroid99): check for IPv6 result, once we can get that * consistently */ @@ -290,16 +346,18 @@ describe('Name Resolver', () => { it('Should resolve a name with a hyphen', done => { /* TODO(murgatroid99): Find or create a better domain name to test this with. * This is just the first one I found with a hyphen. */ - const target = resolverManager.mapUriDefaultScheme(parseUri('network-tools.com')!)!; + const target = resolverManager.mapUriDefaultScheme( + parseUri('network-tools.com')! + )!; const listener: resolverManager.ResolverListener = { onSuccessfulResolution: ( - addressList: SubchannelAddress[], + endpointList: Endpoint[], serviceConfig: ServiceConfig | null, serviceConfigError: StatusObject | null ) => { // Only handle the first resolution result listener.onSuccessfulResolution = () => {}; - assert(addressList.length > 0); + assert(endpointList.length > 0); done(); }, onError: (error: StatusObject) => { @@ -309,17 +367,25 @@ describe('Name Resolver', () => { const resolver = resolverManager.createResolver(target, listener, {}); resolver.updateResolution(); }); + /* This test also serves as a regression test for + * https://github.com/grpc/grpc-node/issues/1044, specifically handling + * hyphens and multiple periods in a DNS name. It should not be skipped + * unless there is another test for the same issue. */ it('Should resolve gRPC interop servers', done => { let completeCount = 0; - const target1 = resolverManager.mapUriDefaultScheme(parseUri('grpc-test.sandbox.googleapis.com')!)!; - const target2 = resolverManager.mapUriDefaultScheme(parseUri('grpc-test4.sandbox.googleapis.com')!)!; + const target1 = resolverManager.mapUriDefaultScheme( + parseUri('grpc-test.sandbox.googleapis.com')! + )!; + const target2 = resolverManager.mapUriDefaultScheme( + parseUri('grpc-test4.sandbox.googleapis.com')! + )!; const listener: resolverManager.ResolverListener = { onSuccessfulResolution: ( - addressList: SubchannelAddress[], + endpointList: Endpoint[], serviceConfig: ServiceConfig | null, serviceConfigError: StatusObject | null ) => { - assert(addressList.length > 0); + assert(endpointList.length > 0); completeCount += 1; if (completeCount === 2) { // Only handle the first resolution result @@ -336,23 +402,87 @@ describe('Name Resolver', () => { const resolver2 = resolverManager.createResolver(target2, listener, {}); resolver2.updateResolution(); }); + it('should not keep repeating successful resolutions', done => { + const target = resolverManager.mapUriDefaultScheme( + parseUri('localhost')! + )!; + let resultCount = 0; + const resolver = resolverManager.createResolver( + target, + { + onSuccessfulResolution: ( + endpointList: Endpoint[], + serviceConfig: ServiceConfig | null, + serviceConfigError: StatusObject | null + ) => { + assert( + hasMatchingAddress(endpointList, { host: '127.0.0.1', port: 443 }) + ); + assert( + hasMatchingAddress(endpointList, { host: '::1', port: 443 }) + ); + resultCount += 1; + if (resultCount === 1) { + process.nextTick(() => resolver.updateResolution()); + } + }, + onError: (error: StatusObject) => { + assert.ifError(error); + }, + }, + { 'grpc.dns_min_time_between_resolutions_ms': 2000 } + ); + resolver.updateResolution(); + setTimeout(() => { + assert.strictEqual(resultCount, 2, `resultCount ${resultCount} !== 2`); + done(); + }, 10_000); + }).timeout(15_000); + it('should not keep repeating failed resolutions', done => { + const target = resolverManager.mapUriDefaultScheme( + parseUri('host.invalid')! + )!; + let resultCount = 0; + const resolver = resolverManager.createResolver( + target, + { + onSuccessfulResolution: ( + endpointList: Endpoint[], + serviceConfig: ServiceConfig | null, + serviceConfigError: StatusObject | null + ) => { + assert.fail('Resolution succeeded unexpectedly'); + }, + onError: (error: StatusObject) => { + resultCount += 1; + if (resultCount === 1) { + process.nextTick(() => resolver.updateResolution()); + } + }, + }, + {} + ); + resolver.updateResolution(); + setTimeout(() => { + assert.strictEqual(resultCount, 2, `resultCount ${resultCount} !== 2`); + done(); + }, 10_000); + }).timeout(15_000); }); describe('UDS Names', () => { it('Should handle a relative Unix Domain Socket name', done => { - const target = resolverManager.mapUriDefaultScheme(parseUri('unix:socket')!)!; + const target = resolverManager.mapUriDefaultScheme( + parseUri('unix:socket')! + )!; const listener: resolverManager.ResolverListener = { onSuccessfulResolution: ( - addressList: SubchannelAddress[], + endpointList: Endpoint[], serviceConfig: ServiceConfig | null, serviceConfigError: StatusObject | null ) => { // Only handle the first resolution result listener.onSuccessfulResolution = () => {}; - assert( - addressList.some( - addr => !isTcpSubchannelAddress(addr) && addr.path === 'socket' - ) - ); + assert(hasMatchingAddress(endpointList, { path: 'socket' })); done(); }, onError: (error: StatusObject) => { @@ -363,21 +493,18 @@ describe('Name Resolver', () => { resolver.updateResolution(); }); it('Should handle an absolute Unix Domain Socket name', done => { - const target = resolverManager.mapUriDefaultScheme(parseUri('unix:///tmp/socket')!)!; + const target = resolverManager.mapUriDefaultScheme( + parseUri('unix:///tmp/socket')! + )!; const listener: resolverManager.ResolverListener = { onSuccessfulResolution: ( - addressList: SubchannelAddress[], + endpointList: Endpoint[], serviceConfig: ServiceConfig | null, serviceConfigError: StatusObject | null ) => { // Only handle the first resolution result listener.onSuccessfulResolution = () => {}; - assert( - addressList.some( - addr => - !isTcpSubchannelAddress(addr) && addr.path === '/tmp/socket' - ) - ); + assert(hasMatchingAddress(endpointList, { path: '/tmp/socket' })); done(); }, onError: (error: StatusObject) => { @@ -390,22 +517,19 @@ describe('Name Resolver', () => { }); describe('IP Addresses', () => { it('should handle one IPv4 address with no port', done => { - const target = resolverManager.mapUriDefaultScheme(parseUri('ipv4:127.0.0.1')!)!; + const target = resolverManager.mapUriDefaultScheme( + parseUri('ipv4:127.0.0.1')! + )!; const listener: resolverManager.ResolverListener = { onSuccessfulResolution: ( - addressList: SubchannelAddress[], + endpointList: Endpoint[], serviceConfig: ServiceConfig | null, serviceConfigError: StatusObject | null ) => { // Only handle the first resolution result listener.onSuccessfulResolution = () => {}; assert( - addressList.some( - addr => - isTcpSubchannelAddress(addr) && - addr.host === '127.0.0.1' && - addr.port === 443 - ) + hasMatchingAddress(endpointList, { host: '127.0.0.1', port: 443 }) ); done(); }, @@ -417,22 +541,19 @@ describe('Name Resolver', () => { resolver.updateResolution(); }); it('should handle one IPv4 address with a port', done => { - const target = resolverManager.mapUriDefaultScheme(parseUri('ipv4:127.0.0.1:50051')!)!; + const target = resolverManager.mapUriDefaultScheme( + parseUri('ipv4:127.0.0.1:50051')! + )!; const listener: resolverManager.ResolverListener = { onSuccessfulResolution: ( - addressList: SubchannelAddress[], + endpointList: Endpoint[], serviceConfig: ServiceConfig | null, serviceConfigError: StatusObject | null ) => { // Only handle the first resolution result listener.onSuccessfulResolution = () => {}; assert( - addressList.some( - addr => - isTcpSubchannelAddress(addr) && - addr.host === '127.0.0.1' && - addr.port === 50051 - ) + hasMatchingAddress(endpointList, { host: '127.0.0.1', port: 50051 }) ); done(); }, @@ -444,30 +565,22 @@ describe('Name Resolver', () => { resolver.updateResolution(); }); it('should handle multiple IPv4 addresses with different ports', done => { - const target = resolverManager.mapUriDefaultScheme(parseUri('ipv4:127.0.0.1:50051,127.0.0.1:50052')!)!; + const target = resolverManager.mapUriDefaultScheme( + parseUri('ipv4:127.0.0.1:50051,127.0.0.1:50052')! + )!; const listener: resolverManager.ResolverListener = { onSuccessfulResolution: ( - addressList: SubchannelAddress[], + endpointList: Endpoint[], serviceConfig: ServiceConfig | null, serviceConfigError: StatusObject | null ) => { // Only handle the first resolution result listener.onSuccessfulResolution = () => {}; assert( - addressList.some( - addr => - isTcpSubchannelAddress(addr) && - addr.host === '127.0.0.1' && - addr.port === 50051 - ) + hasMatchingAddress(endpointList, { host: '127.0.0.1', port: 50051 }) ); assert( - addressList.some( - addr => - isTcpSubchannelAddress(addr) && - addr.host === '127.0.0.1' && - addr.port === 50052 - ) + hasMatchingAddress(endpointList, { host: '127.0.0.1', port: 50052 }) ); done(); }, @@ -479,23 +592,18 @@ describe('Name Resolver', () => { resolver.updateResolution(); }); it('should handle one IPv6 address with no port', done => { - const target = resolverManager.mapUriDefaultScheme(parseUri('ipv6:::1')!)!; + const target = resolverManager.mapUriDefaultScheme( + parseUri('ipv6:::1')! + )!; const listener: resolverManager.ResolverListener = { onSuccessfulResolution: ( - addressList: SubchannelAddress[], + endpointList: Endpoint[], serviceConfig: ServiceConfig | null, serviceConfigError: StatusObject | null ) => { // Only handle the first resolution result listener.onSuccessfulResolution = () => {}; - assert( - addressList.some( - addr => - isTcpSubchannelAddress(addr) && - addr.host === '::1' && - addr.port === 443 - ) - ); + assert(hasMatchingAddress(endpointList, { host: '::1', port: 443 })); done(); }, onError: (error: StatusObject) => { @@ -506,22 +614,19 @@ describe('Name Resolver', () => { resolver.updateResolution(); }); it('should handle one IPv6 address with a port', done => { - const target = resolverManager.mapUriDefaultScheme(parseUri('ipv6:[::1]:50051')!)!; + const target = resolverManager.mapUriDefaultScheme( + parseUri('ipv6:[::1]:50051')! + )!; const listener: resolverManager.ResolverListener = { onSuccessfulResolution: ( - addressList: SubchannelAddress[], + endpointList: Endpoint[], serviceConfig: ServiceConfig | null, serviceConfigError: StatusObject | null ) => { // Only handle the first resolution result listener.onSuccessfulResolution = () => {}; assert( - addressList.some( - addr => - isTcpSubchannelAddress(addr) && - addr.host === '::1' && - addr.port === 50051 - ) + hasMatchingAddress(endpointList, { host: '::1', port: 50051 }) ); done(); }, @@ -533,30 +638,22 @@ describe('Name Resolver', () => { resolver.updateResolution(); }); it('should handle multiple IPv6 addresses with different ports', done => { - const target = resolverManager.mapUriDefaultScheme(parseUri('ipv6:[::1]:50051,[::1]:50052')!)!; + const target = resolverManager.mapUriDefaultScheme( + parseUri('ipv6:[::1]:50051,[::1]:50052')! + )!; const listener: resolverManager.ResolverListener = { onSuccessfulResolution: ( - addressList: SubchannelAddress[], + endpointList: Endpoint[], serviceConfig: ServiceConfig | null, serviceConfigError: StatusObject | null ) => { // Only handle the first resolution result listener.onSuccessfulResolution = () => {}; assert( - addressList.some( - addr => - isTcpSubchannelAddress(addr) && - addr.host === '::1' && - addr.port === 50051 - ) + hasMatchingAddress(endpointList, { host: '::1', port: 50051 }) ); assert( - addressList.some( - addr => - isTcpSubchannelAddress(addr) && - addr.host === '::1' && - addr.port === 50052 - ) + hasMatchingAddress(endpointList, { host: '::1', port: 50052 }) ); done(); }, @@ -574,8 +671,7 @@ describe('Name Resolver', () => { return []; } - destroy() { - } + destroy() {} static getDefaultAuthority(target: GrpcUri): string { return 'other'; @@ -584,7 +680,9 @@ describe('Name Resolver', () => { it('Should return the correct authority if a different resolver has been registered', () => { resolverManager.registerResolver('other', OtherResolver); - const target = resolverManager.mapUriDefaultScheme(parseUri('other:name')!)!; + const target = resolverManager.mapUriDefaultScheme( + parseUri('other:name')! + )!; console.log(target); const authority = resolverManager.getDefaultAuthority(target); diff --git a/packages/grpc-js/test/test-retry-config.ts b/packages/grpc-js/test/test-retry-config.ts new file mode 100644 index 000000000..77952e668 --- /dev/null +++ b/packages/grpc-js/test/test-retry-config.ts @@ -0,0 +1,316 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import assert = require('assert'); +import { validateServiceConfig } from '../src/service-config'; + +function createRetryServiceConfig(retryConfig: object): object { + return { + loadBalancingConfig: [], + methodConfig: [ + { + name: [ + { + service: 'A', + method: 'B', + }, + ], + + retryPolicy: retryConfig, + }, + ], + }; +} + +function createHedgingServiceConfig(hedgingConfig: object): object { + return { + loadBalancingConfig: [], + methodConfig: [ + { + name: [ + { + service: 'A', + method: 'B', + }, + ], + + hedgingPolicy: hedgingConfig, + }, + ], + }; +} + +function createThrottlingServiceConfig(retryThrottling: object): object { + return { + loadBalancingConfig: [], + methodConfig: [], + retryThrottling: retryThrottling, + }; +} + +interface TestCase { + description: string; + config: object; + error: RegExp; +} + +const validRetryConfig = { + maxAttempts: 2, + initialBackoff: '1s', + maxBackoff: '1s', + backoffMultiplier: 1, + retryableStatusCodes: [14, 'RESOURCE_EXHAUSTED'], +}; + +const RETRY_TEST_CASES: TestCase[] = [ + { + description: 'omitted maxAttempts', + config: { + initialBackoff: '1s', + maxBackoff: '1s', + backoffMultiplier: 1, + retryableStatusCodes: [14], + }, + error: /retry policy: maxAttempts must be an integer at least 2/, + }, + { + description: 'a low maxAttempts', + config: { ...validRetryConfig, maxAttempts: 1 }, + error: /retry policy: maxAttempts must be an integer at least 2/, + }, + { + description: 'omitted initialBackoff', + config: { + maxAttempts: 2, + maxBackoff: '1s', + backoffMultiplier: 1, + retryableStatusCodes: [14], + }, + error: + /retry policy: initialBackoff must be a string consisting of a positive integer followed by s/, + }, + { + description: 'a non-numeric initialBackoff', + config: { ...validRetryConfig, initialBackoff: 'abcs' }, + error: + /retry policy: initialBackoff must be a string consisting of a positive integer followed by s/, + }, + { + description: 'an initialBackoff without an s', + config: { ...validRetryConfig, initialBackoff: '123' }, + error: + /retry policy: initialBackoff must be a string consisting of a positive integer followed by s/, + }, + { + description: 'omitted maxBackoff', + config: { + maxAttempts: 2, + initialBackoff: '1s', + backoffMultiplier: 1, + retryableStatusCodes: [14], + }, + error: + /retry policy: maxBackoff must be a string consisting of a positive integer followed by s/, + }, + { + description: 'a non-numeric maxBackoff', + config: { ...validRetryConfig, maxBackoff: 'abcs' }, + error: + /retry policy: maxBackoff must be a string consisting of a positive integer followed by s/, + }, + { + description: 'an maxBackoff without an s', + config: { ...validRetryConfig, maxBackoff: '123' }, + error: + /retry policy: maxBackoff must be a string consisting of a positive integer followed by s/, + }, + { + description: 'omitted backoffMultiplier', + config: { + maxAttempts: 2, + initialBackoff: '1s', + maxBackoff: '1s', + retryableStatusCodes: [14], + }, + error: /retry policy: backoffMultiplier must be a number greater than 0/, + }, + { + description: 'a negative backoffMultiplier', + config: { ...validRetryConfig, backoffMultiplier: -1 }, + error: /retry policy: backoffMultiplier must be a number greater than 0/, + }, + { + description: 'omitted retryableStatusCodes', + config: { + maxAttempts: 2, + initialBackoff: '1s', + maxBackoff: '1s', + backoffMultiplier: 1, + }, + error: /retry policy: retryableStatusCodes is required/, + }, + { + description: 'empty retryableStatusCodes', + config: { ...validRetryConfig, retryableStatusCodes: [] }, + error: /retry policy: retryableStatusCodes must be non-empty/, + }, + { + description: 'unknown status code name', + config: { ...validRetryConfig, retryableStatusCodes: ['abcd'] }, + error: /retry policy: retryableStatusCodes value not a status code name/, + }, + { + description: 'out of range status code number', + config: { ...validRetryConfig, retryableStatusCodes: [12345] }, + error: /retry policy: retryableStatusCodes value not in status code range/, + }, +]; + +const validHedgingConfig = { + maxAttempts: 2, +}; + +const HEDGING_TEST_CASES: TestCase[] = [ + { + description: 'omitted maxAttempts', + config: {}, + error: /hedging policy: maxAttempts must be an integer at least 2/, + }, + { + description: 'a low maxAttempts', + config: { ...validHedgingConfig, maxAttempts: 1 }, + error: /hedging policy: maxAttempts must be an integer at least 2/, + }, + { + description: 'a non-numeric hedgingDelay', + config: { ...validHedgingConfig, hedgingDelay: 'abcs' }, + error: + /hedging policy: hedgingDelay must be a string consisting of a positive integer followed by s/, + }, + { + description: 'a hedgingDelay without an s', + config: { ...validHedgingConfig, hedgingDelay: '123' }, + error: + /hedging policy: hedgingDelay must be a string consisting of a positive integer followed by s/, + }, + { + description: 'unknown status code name', + config: { ...validHedgingConfig, nonFatalStatusCodes: ['abcd'] }, + error: /hedging policy: nonFatalStatusCodes value not a status code name/, + }, + { + description: 'out of range status code number', + config: { ...validHedgingConfig, nonFatalStatusCodes: [12345] }, + error: /hedging policy: nonFatalStatusCodes value not in status code range/, + }, +]; + +const validThrottlingConfig = { + maxTokens: 100, + tokenRatio: 0.1, +}; + +const THROTTLING_TEST_CASES: TestCase[] = [ + { + description: 'omitted maxTokens', + config: { tokenRatio: 0.1 }, + error: /retryThrottling: maxTokens must be a number in \(0, 1000\]/, + }, + { + description: 'a large maxTokens', + config: { ...validThrottlingConfig, maxTokens: 1001 }, + error: /retryThrottling: maxTokens must be a number in \(0, 1000\]/, + }, + { + description: 'zero maxTokens', + config: { ...validThrottlingConfig, maxTokens: 0 }, + error: /retryThrottling: maxTokens must be a number in \(0, 1000\]/, + }, + { + description: 'omitted tokenRatio', + config: { maxTokens: 100 }, + error: /retryThrottling: tokenRatio must be a number greater than 0/, + }, + { + description: 'zero tokenRatio', + config: { ...validThrottlingConfig, tokenRatio: 0 }, + error: /retryThrottling: tokenRatio must be a number greater than 0/, + }, +]; + +describe('Retry configs', () => { + describe('Retry', () => { + it('Should accept a valid config', () => { + assert.doesNotThrow(() => { + validateServiceConfig(createRetryServiceConfig(validRetryConfig)); + }); + }); + for (const testCase of RETRY_TEST_CASES) { + it(`Should reject ${testCase.description}`, () => { + assert.throws(() => { + validateServiceConfig(createRetryServiceConfig(testCase.config)); + }, testCase.error); + }); + } + }); + describe('Hedging', () => { + it('Should accept valid configs', () => { + assert.doesNotThrow(() => { + validateServiceConfig(createHedgingServiceConfig(validHedgingConfig)); + }); + assert.doesNotThrow(() => { + validateServiceConfig( + createHedgingServiceConfig({ + ...validHedgingConfig, + hedgingDelay: '1s', + }) + ); + }); + assert.doesNotThrow(() => { + validateServiceConfig( + createHedgingServiceConfig({ + ...validHedgingConfig, + nonFatalStatusCodes: [14, 'RESOURCE_EXHAUSTED'], + }) + ); + }); + }); + for (const testCase of HEDGING_TEST_CASES) { + it(`Should reject ${testCase.description}`, () => { + assert.throws(() => { + validateServiceConfig(createHedgingServiceConfig(testCase.config)); + }, testCase.error); + }); + } + }); + describe('Throttling', () => { + it('Should accept a valid config', () => { + assert.doesNotThrow(() => { + validateServiceConfig( + createThrottlingServiceConfig(validThrottlingConfig) + ); + }); + }); + for (const testCase of THROTTLING_TEST_CASES) { + it(`Should reject ${testCase.description}`, () => { + assert.throws(() => { + validateServiceConfig(createThrottlingServiceConfig(testCase.config)); + }, testCase.error); + }); + } + }); +}); diff --git a/packages/grpc-js/test/test-retry.ts b/packages/grpc-js/test/test-retry.ts new file mode 100644 index 000000000..e66e96eb0 --- /dev/null +++ b/packages/grpc-js/test/test-retry.ts @@ -0,0 +1,408 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import * as assert from 'assert'; +import * as path from 'path'; +import * as grpc from '../src'; +import { loadProtoFile } from './common'; + +const protoFile = path.join(__dirname, 'fixtures', 'echo_service.proto'); +const EchoService = loadProtoFile(protoFile) + .EchoService as grpc.ServiceClientConstructor; + +const serviceImpl = { + echo: ( + call: grpc.ServerUnaryCall, + callback: grpc.sendUnaryData + ) => { + const succeedOnRetryAttempt = call.metadata.get('succeed-on-retry-attempt'); + const previousAttempts = call.metadata.get('grpc-previous-rpc-attempts'); + if ( + succeedOnRetryAttempt.length === 0 || + (previousAttempts.length > 0 && + previousAttempts[0] === succeedOnRetryAttempt[0]) + ) { + callback(null, call.request); + } else { + const statusCode = call.metadata.get('respond-with-status'); + const code = statusCode[0] + ? Number.parseInt(statusCode[0] as string) + : grpc.status.UNKNOWN; + callback({ + code: code, + details: `Failed on retry ${previousAttempts[0] ?? 0}`, + }); + } + }, +}; + +describe('Retries', () => { + let server: grpc.Server; + let port: number; + before(done => { + server = new grpc.Server(); + server.addService(EchoService.service, serviceImpl); + server.bindAsync( + 'localhost:0', + grpc.ServerCredentials.createInsecure(), + (error, portNumber) => { + if (error) { + done(error); + return; + } + port = portNumber; + server.start(); + done(); + } + ); + }); + + after(() => { + server.forceShutdown(); + }); + + describe('Client with retries disabled', () => { + let client: InstanceType; + before(() => { + client = new EchoService( + `localhost:${port}`, + grpc.credentials.createInsecure(), + { 'grpc.enable_retries': 0 } + ); + }); + + after(() => { + client.close(); + }); + + it('Should be able to make a basic request', done => { + client.echo( + { value: 'test value', value2: 3 }, + (error: grpc.ServiceError, response: any) => { + assert.ifError(error); + assert.deepStrictEqual(response, { value: 'test value', value2: 3 }); + done(); + } + ); + }); + + it('Should fail if the server fails the first request', done => { + const metadata = new grpc.Metadata(); + metadata.set('succeed-on-retry-attempt', '1'); + client.echo( + { value: 'test value', value2: 3 }, + metadata, + (error: grpc.ServiceError, response: any) => { + assert(error); + assert.strictEqual(error.details, 'Failed on retry 0'); + done(); + } + ); + }); + }); + + describe('Client with retries enabled but not configured', () => { + let client: InstanceType; + before(() => { + client = new EchoService( + `localhost:${port}`, + grpc.credentials.createInsecure() + ); + }); + + after(() => { + client.close(); + }); + + it('Should be able to make a basic request', done => { + client.echo( + { value: 'test value', value2: 3 }, + (error: grpc.ServiceError, response: any) => { + assert.ifError(error); + assert.deepStrictEqual(response, { value: 'test value', value2: 3 }); + done(); + } + ); + }); + + it('Should fail if the server fails the first request', done => { + const metadata = new grpc.Metadata(); + metadata.set('succeed-on-retry-attempt', '1'); + client.echo( + { value: 'test value', value2: 3 }, + metadata, + (error: grpc.ServiceError, response: any) => { + assert(error); + assert.strictEqual(error.details, 'Failed on retry 0'); + done(); + } + ); + }); + }); + + describe('Client with retries configured', () => { + let client: InstanceType; + before(() => { + const serviceConfig = { + loadBalancingConfig: [], + methodConfig: [ + { + name: [ + { + service: 'EchoService', + }, + ], + retryPolicy: { + maxAttempts: 3, + initialBackoff: '0.1s', + maxBackoff: '10s', + backoffMultiplier: 1.2, + retryableStatusCodes: [14, 'RESOURCE_EXHAUSTED'], + }, + }, + ], + }; + client = new EchoService( + `localhost:${port}`, + grpc.credentials.createInsecure(), + { 'grpc.service_config': JSON.stringify(serviceConfig) } + ); + }); + + after(() => { + client.close(); + }); + + it('Should be able to make a basic request', done => { + client.echo( + { value: 'test value', value2: 3 }, + (error: grpc.ServiceError, response: any) => { + assert.ifError(error); + assert.deepStrictEqual(response, { value: 'test value', value2: 3 }); + done(); + } + ); + }); + + it('Should succeed with few required attempts', done => { + const metadata = new grpc.Metadata(); + metadata.set('succeed-on-retry-attempt', '2'); + metadata.set('respond-with-status', `${grpc.status.RESOURCE_EXHAUSTED}`); + client.echo( + { value: 'test value', value2: 3 }, + metadata, + (error: grpc.ServiceError, response: any) => { + assert.ifError(error); + assert.deepStrictEqual(response, { value: 'test value', value2: 3 }); + done(); + } + ); + }); + + it('Should fail with many required attempts', done => { + const metadata = new grpc.Metadata(); + metadata.set('succeed-on-retry-attempt', '4'); + metadata.set('respond-with-status', `${grpc.status.RESOURCE_EXHAUSTED}`); + client.echo( + { value: 'test value', value2: 3 }, + metadata, + (error: grpc.ServiceError, response: any) => { + assert(error); + assert.strictEqual(error.details, 'Failed on retry 2'); + done(); + } + ); + }); + + it('Should fail with a fatal status code', done => { + const metadata = new grpc.Metadata(); + metadata.set('succeed-on-retry-attempt', '2'); + metadata.set('respond-with-status', `${grpc.status.NOT_FOUND}`); + client.echo( + { value: 'test value', value2: 3 }, + metadata, + (error: grpc.ServiceError, response: any) => { + assert(error); + assert.strictEqual(error.details, 'Failed on retry 0'); + done(); + } + ); + }); + + it('Should not be able to make more than 5 attempts', done => { + const serviceConfig = { + loadBalancingConfig: [], + methodConfig: [ + { + name: [ + { + service: 'EchoService', + }, + ], + retryPolicy: { + maxAttempts: 10, + initialBackoff: '0.1s', + maxBackoff: '10s', + backoffMultiplier: 1.2, + retryableStatusCodes: [14, 'RESOURCE_EXHAUSTED'], + }, + }, + ], + }; + const client2 = new EchoService( + `localhost:${port}`, + grpc.credentials.createInsecure(), + { 'grpc.service_config': JSON.stringify(serviceConfig) } + ); + const metadata = new grpc.Metadata(); + metadata.set('succeed-on-retry-attempt', '6'); + metadata.set('respond-with-status', `${grpc.status.RESOURCE_EXHAUSTED}`); + client2.echo( + { value: 'test value', value2: 3 }, + metadata, + (error: grpc.ServiceError, response: any) => { + assert(error); + assert.strictEqual(error.details, 'Failed on retry 4'); + done(); + } + ); + }); + }); + + describe('Client with hedging configured', () => { + let client: InstanceType; + before(() => { + const serviceConfig = { + loadBalancingConfig: [], + methodConfig: [ + { + name: [ + { + service: 'EchoService', + }, + ], + hedgingPolicy: { + maxAttempts: 3, + nonFatalStatusCodes: [14, 'RESOURCE_EXHAUSTED'], + }, + }, + ], + }; + client = new EchoService( + `localhost:${port}`, + grpc.credentials.createInsecure(), + { 'grpc.service_config': JSON.stringify(serviceConfig) } + ); + }); + + after(() => { + client.close(); + }); + + it('Should be able to make a basic request', done => { + client.echo( + { value: 'test value', value2: 3 }, + (error: grpc.ServiceError, response: any) => { + assert.ifError(error); + assert.deepStrictEqual(response, { value: 'test value', value2: 3 }); + done(); + } + ); + }); + + it('Should succeed with few required attempts', done => { + const metadata = new grpc.Metadata(); + metadata.set('succeed-on-retry-attempt', '2'); + metadata.set('respond-with-status', `${grpc.status.RESOURCE_EXHAUSTED}`); + client.echo( + { value: 'test value', value2: 3 }, + metadata, + (error: grpc.ServiceError, response: any) => { + assert.ifError(error); + assert.deepStrictEqual(response, { value: 'test value', value2: 3 }); + done(); + } + ); + }); + + it('Should fail with many required attempts', done => { + const metadata = new grpc.Metadata(); + metadata.set('succeed-on-retry-attempt', '4'); + metadata.set('respond-with-status', `${grpc.status.RESOURCE_EXHAUSTED}`); + client.echo( + { value: 'test value', value2: 3 }, + metadata, + (error: grpc.ServiceError, response: any) => { + assert(error); + assert(error.details.startsWith('Failed on retry')); + done(); + } + ); + }); + + it('Should fail with a fatal status code', done => { + const metadata = new grpc.Metadata(); + metadata.set('succeed-on-retry-attempt', '2'); + metadata.set('respond-with-status', `${grpc.status.NOT_FOUND}`); + client.echo( + { value: 'test value', value2: 3 }, + metadata, + (error: grpc.ServiceError, response: any) => { + assert(error); + assert(error.details.startsWith('Failed on retry')); + done(); + } + ); + }); + + it('Should not be able to make more than 5 attempts', done => { + const serviceConfig = { + loadBalancingConfig: [], + methodConfig: [ + { + name: [ + { + service: 'EchoService', + }, + ], + hedgingPolicy: { + maxAttempts: 10, + nonFatalStatusCodes: [14, 'RESOURCE_EXHAUSTED'], + }, + }, + ], + }; + const client2 = new EchoService( + `localhost:${port}`, + grpc.credentials.createInsecure(), + { 'grpc.service_config': JSON.stringify(serviceConfig) } + ); + const metadata = new grpc.Metadata(); + metadata.set('succeed-on-retry-attempt', '6'); + metadata.set('respond-with-status', `${grpc.status.RESOURCE_EXHAUSTED}`); + client2.echo( + { value: 'test value', value2: 3 }, + metadata, + (error: grpc.ServiceError, response: any) => { + assert(error); + assert(error.details.startsWith('Failed on retry')); + done(); + } + ); + }); + }); +}); diff --git a/packages/grpc-js/test/test-server-deadlines.ts b/packages/grpc-js/test/test-server-deadlines.ts index c1152309d..43e39808a 100644 --- a/packages/grpc-js/test/test-server-deadlines.ts +++ b/packages/grpc-js/test/test-server-deadlines.ts @@ -42,7 +42,8 @@ describe('Server deadlines', () => { before(done => { const protoFile = path.join(__dirname, 'fixtures', 'test_service.proto'); const testServiceDef = loadProtoFile(protoFile); - const testServiceClient = testServiceDef.TestService as ServiceClientConstructor; + const testServiceClient = + testServiceDef.TestService as ServiceClientConstructor; server = new Server(); server.addService(testServiceClient.service, { @@ -109,8 +110,8 @@ describe('Server deadlines', () => { {}, (error: any, response: any) => { assert(error); - assert.strictEqual(error.code, grpc.status.OUT_OF_RANGE); - assert.strictEqual(error.details, 'Invalid deadline'); + assert.strictEqual(error.code, grpc.status.INTERNAL); + assert.match(error.details, /^Invalid grpc-timeout value/); done(); } ); @@ -126,7 +127,8 @@ describe('Cancellation', () => { before(done => { const protoFile = path.join(__dirname, 'fixtures', 'test_service.proto'); const testServiceDef = loadProtoFile(protoFile); - const testServiceClient = testServiceDef.TestService as ServiceClientConstructor; + const testServiceClient = + testServiceDef.TestService as ServiceClientConstructor; server = new Server(); server.addService(testServiceClient.service, { diff --git a/packages/grpc-js/test/test-server-errors.ts b/packages/grpc-js/test/test-server-errors.ts index 91b7c196c..243e10918 100644 --- a/packages/grpc-js/test/test-server-errors.ts +++ b/packages/grpc-js/test/test-server-errors.ts @@ -33,10 +33,12 @@ import { } from '../src/server-call'; import { loadProtoFile } from './common'; +import { CompressionAlgorithms } from '../src/compression-algorithms'; const protoFile = join(__dirname, 'fixtures', 'test_service.proto'); const testServiceDef = loadProtoFile(protoFile); -const testServiceClient = testServiceDef.TestService as ServiceClientConstructor; +const testServiceClient = + testServiceDef.TestService as ServiceClientConstructor; const clientInsecureCreds = grpc.credentials.createInsecure(); const serverInsecureCreds = grpc.ServerCredentials.createInsecure(); @@ -309,7 +311,7 @@ describe('Other conditions', () => { trailerMetadata ); } else { - cb(null, { count: 1 }, trailerMetadata); + cb(null, { count: 1, message: 'a'.repeat(req.responseLength) }, trailerMetadata); } }, @@ -319,6 +321,7 @@ describe('Other conditions', () => { ) { let count = 0; let errored = false; + let responseLength = 0; stream.on('data', (data: any) => { if (data.error) { @@ -326,13 +329,14 @@ describe('Other conditions', () => { errored = true; cb(new Error(message) as ServiceError, null, trailerMetadata); } else { + responseLength += data.responseLength; count++; } }); stream.on('end', () => { if (!errored) { - cb(null, { count }, trailerMetadata); + cb(null, { count, message: 'a'.repeat(responseLength) }, trailerMetadata); } }); }, @@ -348,7 +352,7 @@ describe('Other conditions', () => { }); } else { for (let i = 1; i <= 5; i++) { - stream.write({ count: i }); + stream.write({ count: i, message: 'a'.repeat(req.responseLength) }); if (req.errorAfter && req.errorAfter === i) { stream.emit('error', { code: grpc.status.UNKNOWN, @@ -375,7 +379,7 @@ describe('Other conditions', () => { err.metadata.add('count', '' + count); stream.emit('error', err); } else { - stream.write({ count }); + stream.write({ count, message: 'a'.repeat(data.responseLength) }); count++; } }); @@ -723,7 +727,7 @@ describe('Other conditions', () => { }); describe('should handle server stream errors correctly', () => { - it('should emit data for all messages before error', (done) => { + it('should emit data for all messages before error', done => { const expectedDataCount = 2; const call = client.serverStream({ errorAfter: expectedDataCount }); @@ -739,6 +743,44 @@ describe('Other conditions', () => { }); }); }); + + describe('Max message size', () => { + const largeMessage = 'a'.repeat(10_000_000); + it('Should be enforced on the server', done => { + client.unary({ message: largeMessage }, (error?: ServiceError) => { + assert(error); + assert.strictEqual(error.code, grpc.status.RESOURCE_EXHAUSTED); + done(); + }); + }); + it('Should be enforced on the client', done => { + client.unary({ responseLength: 10_000_000 }, (error?: ServiceError) => { + assert(error); + assert.strictEqual(error.code, grpc.status.RESOURCE_EXHAUSTED); + done(); + }); + }); + describe('Compressed messages', () => { + it('Should be enforced with gzip', done => { + const compressingClient = new testServiceClient(`localhost:${port}`, clientInsecureCreds, {'grpc.default_compression_algorithm': CompressionAlgorithms.gzip}); + compressingClient.unary({ message: largeMessage }, (error?: ServiceError) => { + assert(error); + assert.strictEqual(error.code, grpc.status.RESOURCE_EXHAUSTED); + assert.match(error.details, /Received message that decompresses to a size larger/); + done(); + }); + }); + it('Should be enforced with deflate', done => { + const compressingClient = new testServiceClient(`localhost:${port}`, clientInsecureCreds, {'grpc.default_compression_algorithm': CompressionAlgorithms.deflate}); + compressingClient.unary({ message: largeMessage }, (error?: ServiceError) => { + assert(error); + assert.strictEqual(error.code, grpc.status.RESOURCE_EXHAUSTED); + assert.match(error.details, /Received message that decompresses to a size larger/); + done(); + }); + }); + }); + }); }); function identity(arg: any): any { diff --git a/packages/grpc-js/test/test-server-interceptors.ts b/packages/grpc-js/test/test-server-interceptors.ts new file mode 100644 index 000000000..5d4038599 --- /dev/null +++ b/packages/grpc-js/test/test-server-interceptors.ts @@ -0,0 +1,340 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import * as assert from 'assert'; +import * as path from 'path'; +import * as grpc from '../src'; +import { TestClient, loadProtoFile } from './common'; + +const protoFile = path.join(__dirname, 'fixtures', 'echo_service.proto'); +const echoService = loadProtoFile(protoFile) + .EchoService as grpc.ServiceClientConstructor; + +const AUTH_HEADER_KEY = 'auth'; +const AUTH_HEADER_ALLOWED_VALUE = 'allowed'; +const testAuthInterceptor: grpc.ServerInterceptor = ( + methodDescriptor, + call +) => { + const authListener = (new grpc.ServerListenerBuilder()) + .withOnReceiveMetadata((metadata, mdNext) => { + if ( + metadata.get(AUTH_HEADER_KEY)?.[0] !== AUTH_HEADER_ALLOWED_VALUE + ) { + call.sendStatus({ + code: grpc.status.UNAUTHENTICATED, + details: 'Auth metadata not correct', + }); + } else { + mdNext(metadata); + } + }).build(); + const responder = (new grpc.ResponderBuilder()) + .withStart(next => next(authListener)).build(); + return new grpc.ServerInterceptingCall(call, responder); +}; + +let eventCounts = { + receiveMetadata: 0, + receiveMessage: 0, + receiveHalfClose: 0, + sendMetadata: 0, + sendMessage: 0, + sendStatus: 0, +}; + +function resetEventCounts() { + eventCounts = { + receiveMetadata: 0, + receiveMessage: 0, + receiveHalfClose: 0, + sendMetadata: 0, + sendMessage: 0, + sendStatus: 0, + }; +} + +/** + * Test interceptor to verify that interceptors see each expected event by + * counting each kind of event. + * @param methodDescription + * @param call + */ +const testLoggingInterceptor: grpc.ServerInterceptor = ( + methodDescription, + call +) => { + return new grpc.ServerInterceptingCall(call, { + start: next => { + next({ + onReceiveMetadata: (metadata, mdNext) => { + eventCounts.receiveMetadata += 1; + mdNext(metadata); + }, + onReceiveMessage: (message, messageNext) => { + eventCounts.receiveMessage += 1; + messageNext(message); + }, + onReceiveHalfClose: hcNext => { + eventCounts.receiveHalfClose += 1; + hcNext(); + }, + }); + }, + sendMetadata: (metadata, mdNext) => { + eventCounts.sendMetadata += 1; + mdNext(metadata); + }, + sendMessage: (message, messageNext) => { + eventCounts.sendMessage += 1; + messageNext(message); + }, + sendStatus: (status, statusNext) => { + eventCounts.sendStatus += 1; + statusNext(status); + }, + }); +}; + +const testHeaderInjectionInterceptor: grpc.ServerInterceptor = ( + methodDescriptor, + call +) => { + return new grpc.ServerInterceptingCall(call, { + start: next => { + const authListener: grpc.ServerListener = { + onReceiveMetadata: (metadata, mdNext) => { + metadata.set('injected-header', 'present'); + mdNext(metadata); + }, + }; + next(authListener); + }, + }); +}; + +describe('Server interceptors', () => { + describe('Auth-type interceptor', () => { + let server: grpc.Server; + let client: TestClient; + /* Tests that an interceptor can entirely prevent the handler from being + * invoked, based on the contents of the metadata. */ + before(done => { + server = new grpc.Server({ interceptors: [testAuthInterceptor] }); + server.addService(echoService.service, { + echo: ( + call: grpc.ServerUnaryCall, + callback: grpc.sendUnaryData + ) => { + // A test will fail if a request makes it to the handler without the correct auth header + assert.strictEqual( + call.metadata.get(AUTH_HEADER_KEY)?.[0], + AUTH_HEADER_ALLOWED_VALUE + ); + callback(null, call.request); + }, + }); + server.bindAsync( + 'localhost:0', + grpc.ServerCredentials.createInsecure(), + (error, port) => { + assert.ifError(error); + client = new TestClient(`localhost:${port}`, false); + done(); + } + ); + }); + after(done => { + client.close(); + server.tryShutdown(done); + }); + it('Should accept a request with the expected header', done => { + const requestMetadata = new grpc.Metadata(); + requestMetadata.set(AUTH_HEADER_KEY, AUTH_HEADER_ALLOWED_VALUE); + client.sendRequestWithMetadata(requestMetadata, done); + }); + it('Should reject a request without the expected header', done => { + const requestMetadata = new grpc.Metadata(); + requestMetadata.set(AUTH_HEADER_KEY, 'not allowed'); + client.sendRequestWithMetadata(requestMetadata, error => { + assert.strictEqual(error?.code, grpc.status.UNAUTHENTICATED); + done(); + }); + }); + }); + describe('Logging-type interceptor', () => { + let server: grpc.Server; + let client: TestClient; + before(done => { + server = new grpc.Server({ interceptors: [testLoggingInterceptor] }); + server.addService(echoService.service, { + echo: ( + call: grpc.ServerUnaryCall, + callback: grpc.sendUnaryData + ) => { + call.sendMetadata(new grpc.Metadata()); + callback(null, call.request); + }, + }); + server.bindAsync( + 'localhost:0', + grpc.ServerCredentials.createInsecure(), + (error, port) => { + assert.ifError(error); + client = new TestClient(`localhost:${port}`, false); + done(); + } + ); + }); + after(done => { + client.close(); + server.tryShutdown(done); + }); + beforeEach(() => { + resetEventCounts(); + }); + it('Should see every event once', done => { + client.sendRequest(error => { + assert.ifError(error); + assert.deepStrictEqual(eventCounts, { + receiveMetadata: 1, + receiveMessage: 1, + receiveHalfClose: 1, + sendMetadata: 1, + sendMessage: 1, + sendStatus: 1, + }); + done(); + }); + }); + }); + describe('Header injection interceptor', () => { + let server: grpc.Server; + let client: TestClient; + before(done => { + server = new grpc.Server({ + interceptors: [testHeaderInjectionInterceptor], + }); + server.addService(echoService.service, { + echo: ( + call: grpc.ServerUnaryCall, + callback: grpc.sendUnaryData + ) => { + assert.strictEqual( + call.metadata.get('injected-header')?.[0], + 'present' + ); + callback(null, call.request); + }, + }); + server.bindAsync( + 'localhost:0', + grpc.ServerCredentials.createInsecure(), + (error, port) => { + assert.ifError(error); + client = new TestClient(`localhost:${port}`, false); + done(); + } + ); + }); + after(done => { + client.close(); + server.tryShutdown(done); + }); + it('Should inject the header for the handler to see', done => { + client.sendRequest(done); + }); + }); + describe('Multiple interceptors', () => { + let server: grpc.Server; + let client: TestClient; + before(done => { + server = new grpc.Server({ + interceptors: [ + testAuthInterceptor, + testLoggingInterceptor, + testHeaderInjectionInterceptor, + ], + }); + server.addService(echoService.service, { + echo: ( + call: grpc.ServerUnaryCall, + callback: grpc.sendUnaryData + ) => { + assert.strictEqual( + call.metadata.get(AUTH_HEADER_KEY)?.[0], + AUTH_HEADER_ALLOWED_VALUE + ); + assert.strictEqual( + call.metadata.get('injected-header')?.[0], + 'present' + ); + call.sendMetadata(new grpc.Metadata()); + callback(null, call.request); + }, + }); + server.bindAsync( + 'localhost:0', + grpc.ServerCredentials.createInsecure(), + (error, port) => { + assert.ifError(error); + client = new TestClient(`localhost:${port}`, false); + done(); + } + ); + }); + after(done => { + client.close(); + server.tryShutdown(done); + }); + beforeEach(() => { + resetEventCounts(); + }); + it('Should not log requests rejected by auth', done => { + const requestMetadata = new grpc.Metadata(); + requestMetadata.set(AUTH_HEADER_KEY, 'not allowed'); + client.sendRequestWithMetadata(requestMetadata, error => { + assert.strictEqual(error?.code, grpc.status.UNAUTHENTICATED); + assert.deepStrictEqual(eventCounts, { + receiveMetadata: 0, + receiveMessage: 0, + receiveHalfClose: 0, + sendMetadata: 0, + sendMessage: 0, + sendStatus: 0, + }); + done(); + }); + }); + it('Should log requests accepted by auth', done => { + const requestMetadata = new grpc.Metadata(); + requestMetadata.set(AUTH_HEADER_KEY, AUTH_HEADER_ALLOWED_VALUE); + client.sendRequestWithMetadata(requestMetadata, error => { + assert.ifError(error); + assert.deepStrictEqual(eventCounts, { + receiveMetadata: 1, + receiveMessage: 1, + receiveHalfClose: 1, + sendMetadata: 1, + sendMessage: 1, + sendStatus: 1, + }); + done(); + }); + }); + }); +}); diff --git a/packages/grpc-js/test/test-server.ts b/packages/grpc-js/test/test-server.ts index 58b102883..dbcdad469 100644 --- a/packages/grpc-js/test/test-server.ts +++ b/packages/grpc-js/test/test-server.ts @@ -21,14 +21,41 @@ import * as assert from 'assert'; import * as fs from 'fs'; import * as http2 from 'http2'; import * as path from 'path'; +import * as protoLoader from '@grpc/proto-loader'; import * as grpc from '../src'; import { Server, ServerCredentials } from '../src'; import { ServiceError } from '../src/call'; import { ServiceClient, ServiceClientConstructor } from '../src/make-client'; -import { sendUnaryData, ServerUnaryCall } from '../src/server-call'; +import { + sendUnaryData, + ServerUnaryCall, + ServerDuplexStream, +} from '../src/server-call'; + +import { assert2, loadProtoFile } from './common'; +import { + TestServiceClient, + TestServiceHandlers, +} from './generated/TestService'; +import { ProtoGrpcType as TestServiceGrpcType } from './generated/test_service'; +import { Request__Output } from './generated/Request'; +import { CompressionAlgorithms } from '../src/compression-algorithms'; + +const loadedTestServiceProto = protoLoader.loadSync( + path.join(__dirname, 'fixtures/test_service.proto'), + { + keepCase: true, + longs: String, + enums: String, + defaults: true, + oneofs: true, + } +); -import { loadProtoFile } from './common'; +const testServiceGrpcObject = grpc.loadPackageDefinition( + loadedTestServiceProto +) as unknown as TestServiceGrpcType; const ca = fs.readFileSync(path.join(__dirname, 'fixtures', 'ca.pem')); const key = fs.readFileSync(path.join(__dirname, 'fixtures', 'server1.key')); @@ -36,6 +63,13 @@ const cert = fs.readFileSync(path.join(__dirname, 'fixtures', 'server1.pem')); function noop(): void {} describe('Server', () => { + let server: Server; + beforeEach(() => { + server = new Server(); + }); + afterEach(() => { + server.forceShutdown(); + }); describe('constructor', () => { it('should work with no arguments', () => { assert.doesNotThrow(() => { @@ -86,27 +120,6 @@ describe('Server', () => { }); }); - it('throws if bind is called after the server is started', done => { - const server = new Server(); - - server.bindAsync( - 'localhost:0', - ServerCredentials.createInsecure(), - (err, port) => { - assert.ifError(err); - server.start(); - assert.throws(() => { - server.bindAsync( - 'localhost:0', - ServerCredentials.createInsecure(), - noop - ); - }, /server is already started/); - server.tryShutdown(done); - } - ); - }); - it('throws on invalid inputs', () => { const server = new Server(); @@ -116,7 +129,15 @@ describe('Server', () => { assert.throws(() => { server.bindAsync('localhost:0', null as any, noop); - }, /creds must be an object/); + }, /creds must be a ServerCredentials object/); + + assert.throws(() => { + server.bindAsync( + 'localhost:0', + grpc.credentials.createInsecure() as any, + noop + ); + }, /creds must be a ServerCredentials object/); assert.throws(() => { server.bindAsync( @@ -126,6 +147,186 @@ describe('Server', () => { ); }, /callback must be a function/); }); + + it('succeeds when called with an already bound port', done => { + server.bindAsync( + 'localhost:0', + ServerCredentials.createInsecure(), + (err, port) => { + assert.ifError(err); + server.bindAsync( + `localhost:${port}`, + ServerCredentials.createInsecure(), + (err2, port2) => { + assert.ifError(err2); + assert.strictEqual(port, port2); + done(); + } + ); + } + ); + }); + + it('fails when called on a bound port with different credentials', done => { + const secureCreds = ServerCredentials.createSsl( + ca, + [{ private_key: key, cert_chain: cert }], + true + ); + server.bindAsync( + 'localhost:0', + ServerCredentials.createInsecure(), + (err, port) => { + assert.ifError(err); + server.bindAsync(`localhost:${port}`, secureCreds, (err2, port2) => { + assert(err2 !== null); + assert.match(err2.message, /credentials/); + done(); + }); + } + ); + }); + }); + + describe('unbind', () => { + let client: grpc.Client | null = null; + beforeEach(() => { + client = null; + }); + afterEach(() => { + client?.close(); + }); + it('refuses to unbind port 0', done => { + assert.throws(() => { + server.unbind('localhost:0'); + }, /port 0/); + server.bindAsync( + 'localhost:0', + ServerCredentials.createInsecure(), + (err, port) => { + assert.ifError(err); + assert.notStrictEqual(port, 0); + assert.throws(() => { + server.unbind('localhost:0'); + }, /port 0/); + done(); + } + ); + }); + + it('successfully unbinds a bound ephemeral port', done => { + server.bindAsync( + 'localhost:0', + ServerCredentials.createInsecure(), + (err, port) => { + client = new grpc.Client( + `localhost:${port}`, + grpc.credentials.createInsecure() + ); + client.makeUnaryRequest( + '/math.Math/Div', + x => x, + x => x, + Buffer.from('abc'), + (callError1, result) => { + assert(callError1); + // UNIMPLEMENTED means that the request reached the call handling code + assert.strictEqual(callError1.code, grpc.status.UNIMPLEMENTED); + server.unbind(`localhost:${port}`); + const deadline = new Date(); + deadline.setSeconds(deadline.getSeconds() + 1); + client!.makeUnaryRequest( + '/math.Math/Div', + x => x, + x => x, + Buffer.from('abc'), + { deadline: deadline }, + (callError2, result) => { + assert(callError2); + // DEADLINE_EXCEEDED means that the server is unreachable + assert( + callError2.code === grpc.status.DEADLINE_EXCEEDED || + callError2.code === grpc.status.UNAVAILABLE + ); + done(); + } + ); + } + ); + } + ); + }); + + it('cancels a bindAsync in progress', done => { + server.bindAsync( + 'localhost:50051', + ServerCredentials.createInsecure(), + (err, port) => { + assert(err); + assert.match(err.message, /cancelled by unbind/); + done(); + } + ); + server.unbind('localhost:50051'); + }); + }); + + describe('drain', () => { + let client: ServiceClient; + let portNumber: number; + const protoFile = path.join(__dirname, 'fixtures', 'echo_service.proto'); + const echoService = loadProtoFile(protoFile) + .EchoService as ServiceClientConstructor; + + const serviceImplementation = { + echo(call: ServerUnaryCall, callback: sendUnaryData) { + callback(null, call.request); + }, + echoBidiStream(call: ServerDuplexStream) { + call.on('data', data => { + call.write(data); + }); + call.on('end', () => { + call.end(); + }); + }, + }; + + beforeEach(done => { + server.addService(echoService.service, serviceImplementation); + + server.bindAsync( + 'localhost:0', + ServerCredentials.createInsecure(), + (err, port) => { + assert.ifError(err); + portNumber = port; + client = new echoService( + `localhost:${port}`, + grpc.credentials.createInsecure() + ); + server.start(); + done(); + } + ); + }); + + afterEach(done => { + client.close(); + server.tryShutdown(done); + }); + + it('Should cancel open calls after the grace period ends', done => { + const call = client.echoBidiStream(); + call.on('error', (error: ServiceError) => { + assert.strictEqual(error.code, grpc.status.CANCELLED); + done(); + }); + call.on('data', () => { + server.drain(`localhost:${portNumber!}`, 100); + }); + call.write({ value: 'abc' }); + }); }); describe('start', () => { @@ -267,7 +468,7 @@ describe('Server', () => { } }; - methodsToVerify.forEach((method) => { + methodsToVerify.forEach(method => { const call = client[method]({}, assertFailsWithUnimplementedError); // for unary call.on('error', assertFailsWithUnimplementedError); // for streamed }); @@ -275,14 +476,12 @@ describe('Server', () => { it('fails for non-object service definition argument', () => { assert.throws(() => { - server.removeService('upsie' as any) - }, /removeService.*requires object as argument/ - ); + server.removeService('upsie' as any); + }, /removeService.*requires object as argument/); }); }); describe('unregister', () => { - let server: Server; let client: ServiceClient; @@ -294,7 +493,7 @@ describe('Server', () => { server = new Server(); server.addService(mathServiceAttrs, { div(call: ServerUnaryCall, callback: sendUnaryData) { - callback(null, {quotient: '42'}); + callback(null, { quotient: '42' }); }, }); server.bindAsync( @@ -319,7 +518,11 @@ describe('Server', () => { it('removes handler by name and returns true', done => { const name = mathServiceAttrs['Div'].path; - assert.strictEqual(server.unregister(name), true, 'Server#unregister should return true on success'); + assert.strictEqual( + server.unregister(name), + true, + 'Server#unregister should return true on success' + ); client.div( { divisor: 4, dividend: 3 }, @@ -332,7 +535,11 @@ describe('Server', () => { }); it('returns false for unknown handler', () => { - assert.strictEqual(server.unregister('noOneHere'), false, 'Server#unregister should return false on failure'); + assert.strictEqual( + server.unregister('noOneHere'), + false, + 'Server#unregister should return false on failure' + ); }); }); @@ -389,6 +596,7 @@ describe('Server', () => { (error: ServiceError, response: any) => { assert(error); assert.strictEqual(error.code, grpc.status.UNIMPLEMENTED); + assert.match(error.details, /does not implement the method.*Div/); done(); } ); @@ -398,6 +606,7 @@ describe('Server', () => { const call = client.sum((error: ServiceError, response: any) => { assert(error); assert.strictEqual(error.code, grpc.status.UNIMPLEMENTED); + assert.match(error.details, /does not implement the method.*Sum/); done(); }); @@ -414,6 +623,7 @@ describe('Server', () => { call.on('error', (err: ServiceError) => { assert(err); assert.strictEqual(err.code, grpc.status.UNIMPLEMENTED); + assert.match(err.details, /does not implement the method.*Fib/); done(); }); }); @@ -428,6 +638,93 @@ describe('Server', () => { call.on('error', (err: ServiceError) => { assert(err); assert.strictEqual(err.code, grpc.status.UNIMPLEMENTED); + assert.match(err.details, /does not implement the method.*DivMany/); + done(); + }); + + call.end(); + }); + }); + + describe('Unregistered service', () => { + let server: Server; + let client: ServiceClient; + + const mathProtoFile = path.join(__dirname, 'fixtures', 'math.proto'); + const mathClient = (loadProtoFile(mathProtoFile).math as any).Math; + + before(done => { + server = new Server(); + // Don't register a service at all + server.bindAsync( + 'localhost:0', + ServerCredentials.createInsecure(), + (err, port) => { + assert.ifError(err); + client = new mathClient( + `localhost:${port}`, + grpc.credentials.createInsecure() + ); + server.start(); + done(); + } + ); + }); + + after(done => { + client.close(); + server.tryShutdown(done); + }); + + it('should respond to a unary call with UNIMPLEMENTED', done => { + client.div( + { divisor: 4, dividend: 3 }, + (error: ServiceError, response: any) => { + assert(error); + assert.strictEqual(error.code, grpc.status.UNIMPLEMENTED); + assert.match(error.details, /does not implement the method.*Div/); + done(); + } + ); + }); + + it('should respond to a client stream with UNIMPLEMENTED', done => { + const call = client.sum((error: ServiceError, response: any) => { + assert(error); + assert.strictEqual(error.code, grpc.status.UNIMPLEMENTED); + assert.match(error.details, /does not implement the method.*Sum/); + done(); + }); + + call.end(); + }); + + it('should respond to a server stream with UNIMPLEMENTED', done => { + const call = client.fib({ limit: 5 }); + + call.on('data', (value: any) => { + assert.fail('No messages expected'); + }); + + call.on('error', (err: ServiceError) => { + assert(err); + assert.strictEqual(err.code, grpc.status.UNIMPLEMENTED); + assert.match(err.details, /does not implement the method.*Fib/); + done(); + }); + }); + + it('should respond to a bidi call with UNIMPLEMENTED', done => { + const call = client.divMany(); + + call.on('data', (value: any) => { + assert.fail('No messages expected'); + }); + + call.on('error', (err: ServiceError) => { + assert(err); + assert.strictEqual(err.code, grpc.status.UNIMPLEMENTED); + assert.match(err.details, /does not implement the method.*DivMany/); done(); }); @@ -439,18 +736,27 @@ describe('Server', () => { describe('Echo service', () => { let server: Server; let client: ServiceClient; + const protoFile = path.join(__dirname, 'fixtures', 'echo_service.proto'); + const echoService = loadProtoFile(protoFile) + .EchoService as ServiceClientConstructor; - before(done => { - const protoFile = path.join(__dirname, 'fixtures', 'echo_service.proto'); - const echoService = loadProtoFile(protoFile) - .EchoService as ServiceClientConstructor; + const serviceImplementation = { + echo(call: ServerUnaryCall, callback: sendUnaryData) { + callback(null, call.request); + }, + echoBidiStream(call: ServerDuplexStream) { + call.on('data', data => { + call.write(data); + }); + call.on('end', () => { + call.end(); + }); + }, + }; + before(done => { server = new Server(); - server.addService(echoService.service, { - echo(call: ServerUnaryCall, callback: sendUnaryData) { - callback(null, call.request); - }, - }); + server.addService(echoService.service, serviceImplementation); server.bindAsync( 'localhost:0', @@ -482,6 +788,53 @@ describe('Echo service', () => { } ); }); + + /* This test passes on Node 18 but fails on Node 16. The failure appears to + * be caused by https://github.com/nodejs/node/issues/42713 */ + it.skip('should continue a stream after server shutdown', done => { + const server2 = new Server(); + server2.addService(echoService.service, serviceImplementation); + server2.bindAsync( + 'localhost:0', + ServerCredentials.createInsecure(), + (err, port) => { + if (err) { + done(err); + return; + } + const client2 = new echoService( + `localhost:${port}`, + grpc.credentials.createInsecure() + ); + server2.start(); + const stream = client2.echoBidiStream(); + const totalMessages = 5; + let messagesSent = 0; + stream.write({ value: 'test value', value2: messagesSent }); + messagesSent += 1; + stream.on('data', () => { + if (messagesSent === 1) { + server2.tryShutdown(assert2.mustCall(() => {})); + } + if (messagesSent >= totalMessages) { + stream.end(); + } else { + stream.write({ value: 'test value', value2: messagesSent }); + messagesSent += 1; + } + }); + stream.on( + 'status', + assert2.mustCall((status: grpc.StatusObject) => { + assert.strictEqual(status.code, grpc.status.OK); + assert.strictEqual(messagesSent, totalMessages); + }) + ); + stream.on('error', () => {}); + assert2.afterMustCallsSatisfied(done); + } + ); + }); }); describe('Generic client and server', () => { @@ -600,3 +953,280 @@ describe('Generic client and server', () => { }); }); }); + +describe('Compressed requests', () => { + const testServiceHandlers: TestServiceHandlers = { + Unary(call, callback) { + callback(null, { count: 500000, message: call.request.message }); + }, + + ClientStream(call, callback) { + let timesCalled = 0; + + call.on('data', () => { + timesCalled += 1; + }); + + call.on('end', () => { + callback(null, { count: timesCalled }); + }); + }, + + ServerStream(call) { + const { request } = call; + + for (let i = 0; i < 5; i++) { + call.write({ count: request.message.length }); + } + + call.end(); + }, + + BidiStream(call) { + call.on('data', (data: Request__Output) => { + call.write({ count: data.message.length }); + }); + + call.on('end', () => { + call.end(); + }); + }, + }; + + describe('Test service client and server with deflate', () => { + let client: TestServiceClient; + let server: Server; + let assignedPort: number; + + before(done => { + server = new Server(); + server.addService( + testServiceGrpcObject.TestService.service, + testServiceHandlers + ); + server.bindAsync( + 'localhost:0', + ServerCredentials.createInsecure(), + (err, port) => { + assert.ifError(err); + server.start(); + assignedPort = port; + client = new testServiceGrpcObject.TestService( + `localhost:${assignedPort}`, + grpc.credentials.createInsecure(), + { + 'grpc.default_compression_algorithm': + CompressionAlgorithms.deflate, + } + ); + done(); + } + ); + }); + + after(done => { + client.close(); + server.tryShutdown(done); + }); + + it('Should compress and decompress when performing unary call', done => { + client.unary({ message: 'foo' }, (err, response) => { + assert.ifError(err); + done(); + }); + }); + + it('Should compress and decompress when performing client stream', done => { + const clientStream = client.clientStream((err, res) => { + assert.ifError(err); + assert.equal(res?.count, 3); + done(); + }); + + clientStream.write({ message: 'foo' }, () => { + clientStream.write({ message: 'bar' }, () => { + clientStream.write({ message: 'baz' }, () => { + setTimeout(() => clientStream.end(), 10); + }); + }); + }); + }); + + it('Should compress and decompress when performing server stream', done => { + const serverStream = client.serverStream({ message: 'foobar' }); + let timesResponded = 0; + + serverStream.on('data', () => { + timesResponded += 1; + }); + + serverStream.on('error', err => { + assert.ifError(err); + done(); + }); + + serverStream.on('end', () => { + assert.equal(timesResponded, 5); + done(); + }); + }); + + it('Should compress and decompress when performing bidi stream', done => { + const bidiStream = client.bidiStream(); + let timesRequested = 0; + let timesResponded = 0; + + bidiStream.on('data', () => { + timesResponded += 1; + }); + + bidiStream.on('error', err => { + assert.ifError(err); + done(); + }); + + bidiStream.on('end', () => { + assert.equal(timesResponded, timesRequested); + done(); + }); + + bidiStream.write({ message: 'foo' }, () => { + timesRequested += 1; + bidiStream.write({ message: 'bar' }, () => { + timesRequested += 1; + bidiStream.write({ message: 'baz' }, () => { + timesRequested += 1; + setTimeout(() => bidiStream.end(), 10); + }); + }); + }); + }); + + it('Should compress and decompress with gzip', done => { + client = new testServiceGrpcObject.TestService( + `localhost:${assignedPort}`, + grpc.credentials.createInsecure(), + { + 'grpc.default_compression_algorithm': CompressionAlgorithms.gzip, + } + ); + + client.unary({ message: 'foo' }, (err, response) => { + assert.ifError(err); + done(); + }); + }); + + it('Should compress and decompress when performing client stream', done => { + const clientStream = client.clientStream((err, res) => { + assert.ifError(err); + assert.equal(res?.count, 3); + done(); + }); + + clientStream.write({ message: 'foo' }, () => { + clientStream.write({ message: 'bar' }, () => { + clientStream.write({ message: 'baz' }, () => { + setTimeout(() => clientStream.end(), 10); + }); + }); + }); + }); + + it('Should compress and decompress when performing server stream', done => { + const serverStream = client.serverStream({ message: 'foobar' }); + let timesResponded = 0; + + serverStream.on('data', () => { + timesResponded += 1; + }); + + serverStream.on('error', err => { + assert.ifError(err); + done(); + }); + + serverStream.on('end', () => { + assert.equal(timesResponded, 5); + done(); + }); + }); + + it('Should compress and decompress when performing bidi stream', done => { + const bidiStream = client.bidiStream(); + let timesRequested = 0; + let timesResponded = 0; + + bidiStream.on('data', () => { + timesResponded += 1; + }); + + bidiStream.on('error', err => { + assert.ifError(err); + done(); + }); + + bidiStream.on('end', () => { + assert.equal(timesResponded, timesRequested); + done(); + }); + + bidiStream.write({ message: 'foo' }, () => { + timesRequested += 1; + bidiStream.write({ message: 'bar' }, () => { + timesRequested += 1; + bidiStream.write({ message: 'baz' }, () => { + timesRequested += 1; + setTimeout(() => bidiStream.end(), 10); + }); + }); + }); + }); + + it('Should handle large messages', done => { + let longMessage = ''; + for (let i = 0; i < 400000; i++) { + const letter = 'abcdefghijklmnopqrstuvwxyz'[ + Math.floor(Math.random() * 26) + ]; + longMessage = longMessage + letter.repeat(10); + } + + client.unary({ message: longMessage }, (err, response) => { + assert.ifError(err); + assert.strictEqual(response?.message, longMessage); + done(); + }); + }); + + /* As of Node 16, Writable and Duplex streams validate the encoding + * argument to write, and the flags values we are passing there are not + * valid. We don't currently have an alternative way to pass that flag + * down, so for now this feature is not supported. */ + it.skip('Should not compress requests when the NoCompress write flag is used', done => { + const bidiStream = client.bidiStream(); + let timesRequested = 0; + let timesResponded = 0; + + bidiStream.on('data', () => { + timesResponded += 1; + }); + + bidiStream.on('error', err => { + assert.ifError(err); + done(); + }); + + bidiStream.on('end', () => { + assert.equal(timesResponded, timesRequested); + done(); + }); + + bidiStream.write({ message: 'foo' }, '2', (err: any) => { + assert.ifError(err); + timesRequested += 1; + setTimeout(() => bidiStream.end(), 10); + }); + }); + }); +}); diff --git a/packages/grpc-js/test/test-uri-parser.ts b/packages/grpc-js/test/test-uri-parser.ts index d04cae539..1e20e5e26 100644 --- a/packages/grpc-js/test/test-uri-parser.ts +++ b/packages/grpc-js/test/test-uri-parser.ts @@ -19,59 +19,129 @@ import * as assert from 'assert'; import * as uriParser from '../src/uri-parser'; import * as resolver from '../src/resolver'; -describe('URI Parser', function(){ - describe('parseUri', function() { - const expectationList: {target: string, result: uriParser.GrpcUri | null}[] = [ - {target: 'localhost', result: {scheme: undefined, authority: undefined, path: 'localhost'}}, +describe('URI Parser', function () { + describe('parseUri', function () { + const expectationList: { + target: string; + result: uriParser.GrpcUri | null; + }[] = [ + { + target: 'localhost', + result: { scheme: undefined, authority: undefined, path: 'localhost' }, + }, /* This looks weird, but it's OK because the resolver selection code will handle it */ - {target: 'localhost:80', result: {scheme: 'localhost', authority: undefined, path: '80'}}, - {target: 'dns:localhost', result: {scheme: 'dns', authority: undefined, path: 'localhost'}}, - {target: 'dns:///localhost', result: {scheme: 'dns', authority: '', path: 'localhost'}}, - {target: 'dns://authority/localhost', result: {scheme: 'dns', authority: 'authority', path: 'localhost'}}, - {target: '//authority/localhost', result: {scheme: undefined, authority: 'authority', path: 'localhost'}}, + { + target: 'localhost:80', + result: { scheme: 'localhost', authority: undefined, path: '80' }, + }, + { + target: 'dns:localhost', + result: { scheme: 'dns', authority: undefined, path: 'localhost' }, + }, + { + target: 'dns:///localhost', + result: { scheme: 'dns', authority: '', path: 'localhost' }, + }, + { + target: 'dns://authority/localhost', + result: { scheme: 'dns', authority: 'authority', path: 'localhost' }, + }, + { + target: '//authority/localhost', + result: { + scheme: undefined, + authority: 'authority', + path: 'localhost', + }, + }, // Regression test for https://github.com/grpc/grpc-node/issues/1359 - {target: 'dns:foo-internal.aws-us-east-2.tracing.staging-edge.foo-data.net:443:443', result: {scheme: 'dns', authority: undefined, path: 'foo-internal.aws-us-east-2.tracing.staging-edge.foo-data.net:443:443'}} + { + target: + 'dns:foo-internal.aws-us-east-2.tracing.staging-edge.foo-data.net:443:443', + result: { + scheme: 'dns', + authority: undefined, + path: 'foo-internal.aws-us-east-2.tracing.staging-edge.foo-data.net:443:443', + }, + }, ]; - for (const {target, result} of expectationList) { - it (target, function() { + for (const { target, result } of expectationList) { + it(target, function () { assert.deepStrictEqual(uriParser.parseUri(target), result); }); } }); - describe('parseUri + mapUriDefaultScheme', function() { - const expectationList: {target: string, result: uriParser.GrpcUri | null}[] = [ - {target: 'localhost', result: {scheme: 'dns', authority: undefined, path: 'localhost'}}, - {target: 'localhost:80', result: {scheme: 'dns', authority: undefined, path: 'localhost:80'}}, - {target: 'dns:localhost', result: {scheme: 'dns', authority: undefined, path: 'localhost'}}, - {target: 'dns:///localhost', result: {scheme: 'dns', authority: '', path: 'localhost'}}, - {target: 'dns://authority/localhost', result: {scheme: 'dns', authority: 'authority', path: 'localhost'}}, - {target: 'unix:socket', result: {scheme: 'unix', authority: undefined, path: 'socket'}}, - {target: 'bad:path', result: {scheme: 'dns', authority: undefined, path: 'bad:path'}} + describe('parseUri + mapUriDefaultScheme', function () { + const expectationList: { + target: string; + result: uriParser.GrpcUri | null; + }[] = [ + { + target: 'localhost', + result: { scheme: 'dns', authority: undefined, path: 'localhost' }, + }, + { + target: 'localhost:80', + result: { scheme: 'dns', authority: undefined, path: 'localhost:80' }, + }, + { + target: 'dns:localhost', + result: { scheme: 'dns', authority: undefined, path: 'localhost' }, + }, + { + target: 'dns:///localhost', + result: { scheme: 'dns', authority: '', path: 'localhost' }, + }, + { + target: 'dns://authority/localhost', + result: { scheme: 'dns', authority: 'authority', path: 'localhost' }, + }, + { + target: 'unix:socket', + result: { scheme: 'unix', authority: undefined, path: 'socket' }, + }, + { + target: 'bad:path', + result: { scheme: 'dns', authority: undefined, path: 'bad:path' }, + }, ]; - for (const {target, result} of expectationList) { - it(target, function() { - assert.deepStrictEqual(resolver.mapUriDefaultScheme(uriParser.parseUri(target) ?? {path: 'null'}), result); - }) + for (const { target, result } of expectationList) { + it(target, function () { + assert.deepStrictEqual( + resolver.mapUriDefaultScheme( + uriParser.parseUri(target) ?? { path: 'null' } + ), + result + ); + }); } }); - - describe('splitHostPort', function() { - const expectationList: {path: string, result: uriParser.HostPort | null}[] = [ - {path: 'localhost', result: {host: 'localhost'}}, - {path: 'localhost:123', result: {host: 'localhost', port: 123}}, - {path: '12345:6789', result: {host: '12345', port: 6789}}, - {path: '[::1]:123', result: {host: '::1', port: 123}}, - {path: '[::1]', result: {host: '::1'}}, - {path: '[', result: null}, - {path: '[123]', result: null}, + + describe('splitHostPort', function () { + const expectationList: { + path: string; + result: uriParser.HostPort | null; + }[] = [ + { path: 'localhost', result: { host: 'localhost' } }, + { path: 'localhost:123', result: { host: 'localhost', port: 123 } }, + { path: '12345:6789', result: { host: '12345', port: 6789 } }, + { path: '[::1]:123', result: { host: '::1', port: 123 } }, + { path: '[::1]', result: { host: '::1' } }, + { path: '[', result: null }, + { path: '[123]', result: null }, // Regression test for https://github.com/grpc/grpc-node/issues/1359 - {path: 'foo-internal.aws-us-east-2.tracing.staging-edge.foo-data.net:443:443', result: {host: 'foo-internal.aws-us-east-2.tracing.staging-edge.foo-data.net:443:443'}} + { + path: 'foo-internal.aws-us-east-2.tracing.staging-edge.foo-data.net:443:443', + result: { + host: 'foo-internal.aws-us-east-2.tracing.staging-edge.foo-data.net:443:443', + }, + }, ]; - for (const {path, result} of expectationList) { - it(path, function() { + for (const { path, result } of expectationList) { + it(path, function () { assert.deepStrictEqual(uriParser.splitHostPort(path), result); }); } }); -}); \ No newline at end of file +}); diff --git a/packages/grpc-js/tsconfig.json b/packages/grpc-js/tsconfig.json index ba675db78..763ceda98 100644 --- a/packages/grpc-js/tsconfig.json +++ b/packages/grpc-js/tsconfig.json @@ -1,15 +1,29 @@ { - "extends": "./node_modules/gts/tsconfig-google.json", "compilerOptions": { + "allowUnreachableCode": false, + "allowUnusedLabels": false, + "declaration": true, + "forceConsistentCasingInFileNames": true, + "noEmitOnError": true, + "noFallthroughCasesInSwitch": true, + "noImplicitReturns": true, + "pretty": true, + "sourceMap": true, + "strict": true, "lib": ["es2017"], "outDir": "build", "target": "es2017", "module": "commonjs", "resolveJsonModule": true, - "incremental": true + "incremental": true, + "types": ["mocha"], + "noUnusedLocals": true }, "include": [ "src/**/*.ts", "test/**/*.ts" + ], + "exclude": [ + "node_modules" ] } diff --git a/packages/grpc-reflection/LICENSE b/packages/grpc-reflection/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/packages/grpc-reflection/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/packages/grpc-reflection/README.md b/packages/grpc-reflection/README.md new file mode 100644 index 000000000..c4aa03b63 --- /dev/null +++ b/packages/grpc-reflection/README.md @@ -0,0 +1,42 @@ +# gRPC Reflection + +gRPC reflection API service for use with gRPC-node. + +## Background + +This package provides an implementation of the [gRPC Server Reflection Protocol](https://github.com/grpc/grpc/blob/master/doc/server-reflection.md) service which can be added to an existing gRPC server. Adding this service to your server will allow clients [such as postman](https://blog.postman.com/postman-now-supports-grpc/) to dynamically load the API specification from your running application rather than needing to pass around and load proto files manually. + +![example of reflection working with postman](./images/example.gif) + +## Installation + +Use the package manager [npm](https://www.npmjs.com/get-npm) to install `@grpc/reflection`. + +```bash +npm install @grpc/reflection +``` + +## Usage + +Any gRPC-node server can use `@grpc/reflection` to expose reflection information about their gRPC API. + +```typescript +import { ReflectionService } from '@grpc/reflection'; + +const pkg = protoLoader.load(...); // Load your gRPC package definition as normal + +// Create the reflection implementation based on your gRPC package and add it to your existing server +const reflection = new ReflectionService(pkg); +reflection.addToServer(server); +``` + +Congrats! Your server now allows any client to request reflection information about its API. + +## Contributing + +Pull requests are welcome. For major changes, please open an issue first to discuss what you would like to change. The original proposal for this library can be found in [gRFC L108](https://github.com/grpc/proposal/blob/master/L108-node-grpc-reflection-library.md) + +Please make sure to update tests as appropriate. + +## License +[Apache License 2.0](https://choosealicense.com/licenses/apache-2.0/) diff --git a/packages/grpc-reflection/gulpfile.ts b/packages/grpc-reflection/gulpfile.ts new file mode 100644 index 000000000..f95b91bc0 --- /dev/null +++ b/packages/grpc-reflection/gulpfile.ts @@ -0,0 +1,50 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import * as gulp from 'gulp'; +import * as mocha from 'gulp-mocha'; +import * as execa from 'execa'; +import * as path from 'path'; + +const reflectionDir = __dirname; +const outDir = path.resolve(reflectionDir, 'build'); + +const execNpmVerb = (verb: string, ...args: string[]) => + execa('npm', [verb, ...args], {cwd: reflectionDir, stdio: 'inherit'}); +const execNpmCommand = execNpmVerb.bind(null, 'run'); + +const install = () => execNpmVerb('install', '--unsafe-perm'); + +/** + * Transpiles TypeScript files in src/ to JavaScript according to the settings + * found in tsconfig.json. + */ +const compile = () => execNpmCommand('compile'); + +const runTests = () => { + return gulp.src(`${outDir}/test/**/*.js`) + .pipe(mocha({reporter: 'mocha-jenkins-reporter', + require: ['ts-node/register']})); +}; + +const test = gulp.series(install, runTests); + +export { + install, + compile, + test +} diff --git a/packages/grpc-reflection/images/example.gif b/packages/grpc-reflection/images/example.gif new file mode 100644 index 000000000..65c92d78f Binary files /dev/null and b/packages/grpc-reflection/images/example.gif differ diff --git a/packages/grpc-reflection/package.json b/packages/grpc-reflection/package.json new file mode 100644 index 000000000..8f0d8c934 --- /dev/null +++ b/packages/grpc-reflection/package.json @@ -0,0 +1,45 @@ +{ + "name": "@grpc/reflection", + "version": "1.0.1", + "author": { + "name": "Google Inc." + }, + "description": "Reflection API service for use with gRPC-node", + "repository": { + "type": "git", + "url": "https://github.com/grpc/grpc-node.git", + "directory": "packages/grpc-reflection" + }, + "bugs": "https://github.com/grpc/grpc-node/issues", + "contributors": [ + { + "name": "Justin Timmons", + "email": "justinmtimmons@gmail.com" + } + ], + "main": "build/src/index.js", + "types": "build/src/index.d.ts", + "files": [ + "build" + ], + "license": "Apache-2.0", + "scripts": { + "compile": "tsc -p .", + "postcompile": "copyfiles './proto/**/*.proto' build/", + "prepare": "npm run generate-types && npm run compile", + "test": "mocha --require ts-node/register test/**.ts", + "generate-types": "proto-loader-gen-types --longs String --enums String --bytes Array --defaults --oneofs --includeComments --includeDirs proto/ -O src/generated grpc/reflection/v1/reflection.proto grpc/reflection/v1alpha/reflection.proto" + }, + "dependencies": { + "@grpc/proto-loader": "^0.7.10", + "protobufjs": "^7.2.5" + }, + "peerDependencies": { + "@grpc/grpc-js": "^1.8.21" + }, + "devDependencies": { + "@grpc/grpc-js": "file:../grpc-js", + "copyfiles": "^2.4.1", + "typescript": "^5.2.2" + } +} diff --git a/packages/grpc-reflection/proto/grpc/reflection/v1/reflection.proto b/packages/grpc-reflection/proto/grpc/reflection/v1/reflection.proto new file mode 100644 index 000000000..1c106af7f --- /dev/null +++ b/packages/grpc-reflection/proto/grpc/reflection/v1/reflection.proto @@ -0,0 +1,149 @@ +// Taken from spec https://raw.githubusercontent.com/grpc/grpc/master/src/proto/grpc/reflection/v1/reflection.proto +// Additional versions can be found here: https://github.com/grpc/grpc/tree/master/src/proto/grpc/reflection + +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +syntax = "proto3"; + +package grpc.reflection.v1; + +option go_package = "google.golang.org/grpc/reflection/grpc_reflection_v1"; +option java_multiple_files = true; +option java_package = "io.grpc.reflection.v1"; +option java_outer_classname = "ServerReflectionProto"; + +service ServerReflection { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + rpc ServerReflectionInfo(stream ServerReflectionRequest) + returns (stream ServerReflectionResponse); +} + +// The message sent by the client when calling ServerReflectionInfo method. +message ServerReflectionRequest { + string host = 1; + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + oneof message_request { + // Find a proto file by the file name. + string file_by_filename = 3; + + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + string file_containing_symbol = 4; + + // Find the proto file which defines an extension extending the given + // message type with the given field number. + ExtensionRequest file_containing_extension = 5; + + // Finds the tag numbers used by all known extensions of the given message + // type, and appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + string all_extension_numbers_of_type = 6; + + // List the full names of registered services. The content will not be + // checked. + string list_services = 7; + } +} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +message ExtensionRequest { + // Fully-qualified type name. The format should be . + string containing_type = 1; + int32 extension_number = 2; +} + +// The message sent by the server to answer ServerReflectionInfo method. +message ServerReflectionResponse { + string valid_host = 1; + ServerReflectionRequest original_request = 2; + // The server sets one of the following fields according to the message_request + // in the request. + oneof message_response { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. + // As the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse file_descriptor_response = 4; + + // This message is used to answer all_extension_numbers_of_type requests. + ExtensionNumberResponse all_extension_numbers_response = 5; + + // This message is used to answer list_services requests. + ListServiceResponse list_services_response = 6; + + // This message is used when an error occurs. + ErrorResponse error_response = 7; + } +} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +message FileDescriptorResponse { + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + repeated bytes file_descriptor_proto = 1; +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +message ExtensionNumberResponse { + // Full name of the base type, including the package name. The format + // is . + string base_type_name = 1; + repeated int32 extension_number = 2; +} + +// A list of ServiceResponse sent by the server answering list_services request. +message ListServiceResponse { + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + repeated ServiceResponse service = 1; +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +message ServiceResponse { + // Full name of a registered service, including its package name. The format + // is . + string name = 1; +} + +// The error code and error message sent by the server when an error occurs. +message ErrorResponse { + // This field uses the error codes defined in grpc::StatusCode. + int32 error_code = 1; + string error_message = 2; +} diff --git a/packages/grpc-reflection/proto/grpc/reflection/v1alpha/reflection.proto b/packages/grpc-reflection/proto/grpc/reflection/v1alpha/reflection.proto new file mode 100644 index 000000000..781659af2 --- /dev/null +++ b/packages/grpc-reflection/proto/grpc/reflection/v1alpha/reflection.proto @@ -0,0 +1,139 @@ +// Taken from spec https://raw.githubusercontent.com/grpc/grpc/master/src/proto/grpc/reflection/v1alpha/reflection.proto +// Additional versions can be found here: https://github.com/grpc/grpc/tree/master/src/proto/grpc/reflection + +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection + +syntax = "proto3"; + +package grpc.reflection.v1alpha; + +service ServerReflection { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + rpc ServerReflectionInfo(stream ServerReflectionRequest) + returns (stream ServerReflectionResponse); +} + +// The message sent by the client when calling ServerReflectionInfo method. +message ServerReflectionRequest { + string host = 1; + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + oneof message_request { + // Find a proto file by the file name. + string file_by_filename = 3; + + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + string file_containing_symbol = 4; + + // Find the proto file which defines an extension extending the given + // message type with the given field number. + ExtensionRequest file_containing_extension = 5; + + // Finds the tag numbers used by all known extensions of the given message + // type, and appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + string all_extension_numbers_of_type = 6; + + // List the full names of registered services. The content will not be + // checked. + string list_services = 7; + } +} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +message ExtensionRequest { + // Fully-qualified type name. The format should be . + string containing_type = 1; + int32 extension_number = 2; +} + +// The message sent by the server to answer ServerReflectionInfo method. +message ServerReflectionResponse { + string valid_host = 1; + ServerReflectionRequest original_request = 2; + // The server set one of the following fields accroding to the message_request + // in the request. + oneof message_response { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. As + // the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse file_descriptor_response = 4; + + // This message is used to answer all_extension_numbers_of_type requst. + ExtensionNumberResponse all_extension_numbers_response = 5; + + // This message is used to answer list_services request. + ListServiceResponse list_services_response = 6; + + // This message is used when an error occurs. + ErrorResponse error_response = 7; + } +} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +message FileDescriptorResponse { + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + repeated bytes file_descriptor_proto = 1; +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +message ExtensionNumberResponse { + // Full name of the base type, including the package name. The format + // is . + string base_type_name = 1; + repeated int32 extension_number = 2; +} + +// A list of ServiceResponse sent by the server answering list_services request. +message ListServiceResponse { + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + repeated ServiceResponse service = 1; +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +message ServiceResponse { + // Full name of a registered service, including its package name. The format + // is . + string name = 1; +} + +// The error code and error message sent by the server when an error occurs. +message ErrorResponse { + // This field uses the error codes defined in grpc::StatusCode. + int32 error_code = 1; + string error_message = 2; +} diff --git a/packages/grpc-reflection/proto/sample/sample.proto b/packages/grpc-reflection/proto/sample/sample.proto new file mode 100644 index 000000000..acf969c1a --- /dev/null +++ b/packages/grpc-reflection/proto/sample/sample.proto @@ -0,0 +1,43 @@ +syntax = "proto3"; + +package sample; + +import 'vendor.proto'; + +service SampleService { + rpc Hello (HelloRequest) returns (HelloResponse) {} + rpc Hello2 (HelloRequest) returns (CommonMessage) {} +} + +service IgnoreService { + rpc Hello (HelloRequest) returns (HelloResponse) {} +} + +message HelloRequest { + string hello = 1; + HelloNested nested = 2; + ShadowedMessage nestedShadowedMessage = 3; + + message HelloNested { + string hello = 1; + CommonMessage field = 2; + } + + message ShadowedMessage { + int32 item = 1; + } +} + +enum HelloStatus { + HELLO = 1; + WORLD = 2; +} + +message HelloResponse { + string world = 1; + HelloStatus status = 2; +} + +message ShadowedMessage { + string hello = 1; +} diff --git a/packages/grpc-reflection/proto/sample/vendor/common.proto b/packages/grpc-reflection/proto/sample/vendor/common.proto new file mode 100644 index 000000000..c89246b5d --- /dev/null +++ b/packages/grpc-reflection/proto/sample/vendor/common.proto @@ -0,0 +1,15 @@ +syntax = "proto2"; + +// NOTE: intentionally using the same 'vendor' package here to document the +// file/package merging behavior of the reflection service. +// +// this file should be combined with vendor.proto to a single definition because +// it's under the same 'vendor' package +package vendor; + +message CommonMessage { + optional string common = 1; + optional DependentMessage dependency = 2; + + extensions 100 to 199; +} diff --git a/packages/grpc-reflection/proto/sample/vendor/dependency/dependency.proto b/packages/grpc-reflection/proto/sample/vendor/dependency/dependency.proto new file mode 100644 index 000000000..44cadfba1 --- /dev/null +++ b/packages/grpc-reflection/proto/sample/vendor/dependency/dependency.proto @@ -0,0 +1,7 @@ +syntax = "proto2"; + +package vendor.dependency; + +message DependentMessage { + optional string something = 1; +} diff --git a/packages/grpc-reflection/proto/sample/vendor/vendor.proto b/packages/grpc-reflection/proto/sample/vendor/vendor.proto new file mode 100644 index 000000000..f8e0c1e5a --- /dev/null +++ b/packages/grpc-reflection/proto/sample/vendor/vendor.proto @@ -0,0 +1,10 @@ +syntax = "proto2"; + +package vendor; + +import "./common.proto"; +import "./dependency/dependency.proto"; + +extend CommonMessage { + optional bool ext = 101; +} diff --git a/packages/grpc-reflection/src/generated/grpc/reflection/v1/ErrorResponse.ts b/packages/grpc-reflection/src/generated/grpc/reflection/v1/ErrorResponse.ts new file mode 100644 index 000000000..e8168c36d --- /dev/null +++ b/packages/grpc-reflection/src/generated/grpc/reflection/v1/ErrorResponse.ts @@ -0,0 +1,24 @@ +// Original file: proto/grpc/reflection/v1/reflection.proto + + +/** + * The error code and error message sent by the server when an error occurs. + */ +export interface ErrorResponse { + /** + * This field uses the error codes defined in grpc::StatusCode. + */ + 'errorCode'?: (number); + 'errorMessage'?: (string); +} + +/** + * The error code and error message sent by the server when an error occurs. + */ +export interface ErrorResponse__Output { + /** + * This field uses the error codes defined in grpc::StatusCode. + */ + 'errorCode': (number); + 'errorMessage': (string); +} diff --git a/packages/grpc-reflection/src/generated/grpc/reflection/v1/ExtensionNumberResponse.ts b/packages/grpc-reflection/src/generated/grpc/reflection/v1/ExtensionNumberResponse.ts new file mode 100644 index 000000000..fdb88119c --- /dev/null +++ b/packages/grpc-reflection/src/generated/grpc/reflection/v1/ExtensionNumberResponse.ts @@ -0,0 +1,28 @@ +// Original file: proto/grpc/reflection/v1/reflection.proto + + +/** + * A list of extension numbers sent by the server answering + * all_extension_numbers_of_type request. + */ +export interface ExtensionNumberResponse { + /** + * Full name of the base type, including the package name. The format + * is . + */ + 'baseTypeName'?: (string); + 'extensionNumber'?: (number)[]; +} + +/** + * A list of extension numbers sent by the server answering + * all_extension_numbers_of_type request. + */ +export interface ExtensionNumberResponse__Output { + /** + * Full name of the base type, including the package name. The format + * is . + */ + 'baseTypeName': (string); + 'extensionNumber': (number)[]; +} diff --git a/packages/grpc-reflection/src/generated/grpc/reflection/v1/ExtensionRequest.ts b/packages/grpc-reflection/src/generated/grpc/reflection/v1/ExtensionRequest.ts new file mode 100644 index 000000000..34c6fefeb --- /dev/null +++ b/packages/grpc-reflection/src/generated/grpc/reflection/v1/ExtensionRequest.ts @@ -0,0 +1,26 @@ +// Original file: proto/grpc/reflection/v1/reflection.proto + + +/** + * The type name and extension number sent by the client when requesting + * file_containing_extension. + */ +export interface ExtensionRequest { + /** + * Fully-qualified type name. The format should be . + */ + 'containingType'?: (string); + 'extensionNumber'?: (number); +} + +/** + * The type name and extension number sent by the client when requesting + * file_containing_extension. + */ +export interface ExtensionRequest__Output { + /** + * Fully-qualified type name. The format should be . + */ + 'containingType': (string); + 'extensionNumber': (number); +} diff --git a/packages/grpc-reflection/src/generated/grpc/reflection/v1/FileDescriptorResponse.ts b/packages/grpc-reflection/src/generated/grpc/reflection/v1/FileDescriptorResponse.ts new file mode 100644 index 000000000..253e650f9 --- /dev/null +++ b/packages/grpc-reflection/src/generated/grpc/reflection/v1/FileDescriptorResponse.ts @@ -0,0 +1,30 @@ +// Original file: proto/grpc/reflection/v1/reflection.proto + + +/** + * Serialized FileDescriptorProto messages sent by the server answering + * a file_by_filename, file_containing_symbol, or file_containing_extension + * request. + */ +export interface FileDescriptorResponse { + /** + * Serialized FileDescriptorProto messages. We avoid taking a dependency on + * descriptor.proto, which uses proto2 only features, by making them opaque + * bytes instead. + */ + 'fileDescriptorProto'?: (Buffer | Uint8Array | string)[]; +} + +/** + * Serialized FileDescriptorProto messages sent by the server answering + * a file_by_filename, file_containing_symbol, or file_containing_extension + * request. + */ +export interface FileDescriptorResponse__Output { + /** + * Serialized FileDescriptorProto messages. We avoid taking a dependency on + * descriptor.proto, which uses proto2 only features, by making them opaque + * bytes instead. + */ + 'fileDescriptorProto': (Uint8Array)[]; +} diff --git a/packages/grpc-reflection/src/generated/grpc/reflection/v1/ListServiceResponse.ts b/packages/grpc-reflection/src/generated/grpc/reflection/v1/ListServiceResponse.ts new file mode 100644 index 000000000..f1824d4cf --- /dev/null +++ b/packages/grpc-reflection/src/generated/grpc/reflection/v1/ListServiceResponse.ts @@ -0,0 +1,25 @@ +// Original file: proto/grpc/reflection/v1/reflection.proto + +import type { ServiceResponse as _grpc_reflection_v1_ServiceResponse, ServiceResponse__Output as _grpc_reflection_v1_ServiceResponse__Output } from '../../../grpc/reflection/v1/ServiceResponse'; + +/** + * A list of ServiceResponse sent by the server answering list_services request. + */ +export interface ListServiceResponse { + /** + * The information of each service may be expanded in the future, so we use + * ServiceResponse message to encapsulate it. + */ + 'service'?: (_grpc_reflection_v1_ServiceResponse)[]; +} + +/** + * A list of ServiceResponse sent by the server answering list_services request. + */ +export interface ListServiceResponse__Output { + /** + * The information of each service may be expanded in the future, so we use + * ServiceResponse message to encapsulate it. + */ + 'service': (_grpc_reflection_v1_ServiceResponse__Output)[]; +} diff --git a/packages/grpc-reflection/src/generated/grpc/reflection/v1/ServerReflection.ts b/packages/grpc-reflection/src/generated/grpc/reflection/v1/ServerReflection.ts new file mode 100644 index 000000000..65d3b571b --- /dev/null +++ b/packages/grpc-reflection/src/generated/grpc/reflection/v1/ServerReflection.ts @@ -0,0 +1,9 @@ +// Original file: proto/grpc/reflection/v1/reflection.proto + +import type { MethodDefinition } from '@grpc/proto-loader' +import type { ServerReflectionRequest as _grpc_reflection_v1_ServerReflectionRequest, ServerReflectionRequest__Output as _grpc_reflection_v1_ServerReflectionRequest__Output } from '../../../grpc/reflection/v1/ServerReflectionRequest'; +import type { ServerReflectionResponse as _grpc_reflection_v1_ServerReflectionResponse, ServerReflectionResponse__Output as _grpc_reflection_v1_ServerReflectionResponse__Output } from '../../../grpc/reflection/v1/ServerReflectionResponse'; + +export interface ServerReflectionDefinition { + ServerReflectionInfo: MethodDefinition<_grpc_reflection_v1_ServerReflectionRequest, _grpc_reflection_v1_ServerReflectionResponse, _grpc_reflection_v1_ServerReflectionRequest__Output, _grpc_reflection_v1_ServerReflectionResponse__Output> +} diff --git a/packages/grpc-reflection/src/generated/grpc/reflection/v1/ServerReflectionRequest.ts b/packages/grpc-reflection/src/generated/grpc/reflection/v1/ServerReflectionRequest.ts new file mode 100644 index 000000000..301bd3953 --- /dev/null +++ b/packages/grpc-reflection/src/generated/grpc/reflection/v1/ServerReflectionRequest.ts @@ -0,0 +1,91 @@ +// Original file: proto/grpc/reflection/v1/reflection.proto + +import type { ExtensionRequest as _grpc_reflection_v1_ExtensionRequest, ExtensionRequest__Output as _grpc_reflection_v1_ExtensionRequest__Output } from '../../../grpc/reflection/v1/ExtensionRequest'; + +/** + * The message sent by the client when calling ServerReflectionInfo method. + */ +export interface ServerReflectionRequest { + 'host'?: (string); + /** + * Find a proto file by the file name. + */ + 'fileByFilename'?: (string); + /** + * Find the proto file that declares the given fully-qualified symbol name. + * This field should be a fully-qualified symbol name + * (e.g. .[.] or .). + */ + 'fileContainingSymbol'?: (string); + /** + * Find the proto file which defines an extension extending the given + * message type with the given field number. + */ + 'fileContainingExtension'?: (_grpc_reflection_v1_ExtensionRequest | null); + /** + * Finds the tag numbers used by all known extensions of the given message + * type, and appends them to ExtensionNumberResponse in an undefined order. + * Its corresponding method is best-effort: it's not guaranteed that the + * reflection service will implement this method, and it's not guaranteed + * that this method will provide all extensions. Returns + * StatusCode::UNIMPLEMENTED if it's not implemented. + * This field should be a fully-qualified type name. The format is + * . + */ + 'allExtensionNumbersOfType'?: (string); + /** + * List the full names of registered services. The content will not be + * checked. + */ + 'listServices'?: (string); + /** + * To use reflection service, the client should set one of the following + * fields in message_request. The server distinguishes requests by their + * defined field and then handles them using corresponding methods. + */ + 'messageRequest'?: "fileByFilename"|"fileContainingSymbol"|"fileContainingExtension"|"allExtensionNumbersOfType"|"listServices"; +} + +/** + * The message sent by the client when calling ServerReflectionInfo method. + */ +export interface ServerReflectionRequest__Output { + 'host': (string); + /** + * Find a proto file by the file name. + */ + 'fileByFilename'?: (string); + /** + * Find the proto file that declares the given fully-qualified symbol name. + * This field should be a fully-qualified symbol name + * (e.g. .[.] or .). + */ + 'fileContainingSymbol'?: (string); + /** + * Find the proto file which defines an extension extending the given + * message type with the given field number. + */ + 'fileContainingExtension'?: (_grpc_reflection_v1_ExtensionRequest__Output | null); + /** + * Finds the tag numbers used by all known extensions of the given message + * type, and appends them to ExtensionNumberResponse in an undefined order. + * Its corresponding method is best-effort: it's not guaranteed that the + * reflection service will implement this method, and it's not guaranteed + * that this method will provide all extensions. Returns + * StatusCode::UNIMPLEMENTED if it's not implemented. + * This field should be a fully-qualified type name. The format is + * . + */ + 'allExtensionNumbersOfType'?: (string); + /** + * List the full names of registered services. The content will not be + * checked. + */ + 'listServices'?: (string); + /** + * To use reflection service, the client should set one of the following + * fields in message_request. The server distinguishes requests by their + * defined field and then handles them using corresponding methods. + */ + 'messageRequest': "fileByFilename"|"fileContainingSymbol"|"fileContainingExtension"|"allExtensionNumbersOfType"|"listServices"; +} diff --git a/packages/grpc-reflection/src/generated/grpc/reflection/v1/ServerReflectionResponse.ts b/packages/grpc-reflection/src/generated/grpc/reflection/v1/ServerReflectionResponse.ts new file mode 100644 index 000000000..bc2790c15 --- /dev/null +++ b/packages/grpc-reflection/src/generated/grpc/reflection/v1/ServerReflectionResponse.ts @@ -0,0 +1,75 @@ +// Original file: proto/grpc/reflection/v1/reflection.proto + +import type { ServerReflectionRequest as _grpc_reflection_v1_ServerReflectionRequest, ServerReflectionRequest__Output as _grpc_reflection_v1_ServerReflectionRequest__Output } from '../../../grpc/reflection/v1/ServerReflectionRequest'; +import type { FileDescriptorResponse as _grpc_reflection_v1_FileDescriptorResponse, FileDescriptorResponse__Output as _grpc_reflection_v1_FileDescriptorResponse__Output } from '../../../grpc/reflection/v1/FileDescriptorResponse'; +import type { ExtensionNumberResponse as _grpc_reflection_v1_ExtensionNumberResponse, ExtensionNumberResponse__Output as _grpc_reflection_v1_ExtensionNumberResponse__Output } from '../../../grpc/reflection/v1/ExtensionNumberResponse'; +import type { ListServiceResponse as _grpc_reflection_v1_ListServiceResponse, ListServiceResponse__Output as _grpc_reflection_v1_ListServiceResponse__Output } from '../../../grpc/reflection/v1/ListServiceResponse'; +import type { ErrorResponse as _grpc_reflection_v1_ErrorResponse, ErrorResponse__Output as _grpc_reflection_v1_ErrorResponse__Output } from '../../../grpc/reflection/v1/ErrorResponse'; + +/** + * The message sent by the server to answer ServerReflectionInfo method. + */ +export interface ServerReflectionResponse { + 'validHost'?: (string); + 'originalRequest'?: (_grpc_reflection_v1_ServerReflectionRequest | null); + /** + * This message is used to answer file_by_filename, file_containing_symbol, + * file_containing_extension requests with transitive dependencies. + * As the repeated label is not allowed in oneof fields, we use a + * FileDescriptorResponse message to encapsulate the repeated fields. + * The reflection service is allowed to avoid sending FileDescriptorProtos + * that were previously sent in response to earlier requests in the stream. + */ + 'fileDescriptorResponse'?: (_grpc_reflection_v1_FileDescriptorResponse | null); + /** + * This message is used to answer all_extension_numbers_of_type requests. + */ + 'allExtensionNumbersResponse'?: (_grpc_reflection_v1_ExtensionNumberResponse | null); + /** + * This message is used to answer list_services requests. + */ + 'listServicesResponse'?: (_grpc_reflection_v1_ListServiceResponse | null); + /** + * This message is used when an error occurs. + */ + 'errorResponse'?: (_grpc_reflection_v1_ErrorResponse | null); + /** + * The server sets one of the following fields according to the message_request + * in the request. + */ + 'messageResponse'?: "fileDescriptorResponse"|"allExtensionNumbersResponse"|"listServicesResponse"|"errorResponse"; +} + +/** + * The message sent by the server to answer ServerReflectionInfo method. + */ +export interface ServerReflectionResponse__Output { + 'validHost': (string); + 'originalRequest': (_grpc_reflection_v1_ServerReflectionRequest__Output | null); + /** + * This message is used to answer file_by_filename, file_containing_symbol, + * file_containing_extension requests with transitive dependencies. + * As the repeated label is not allowed in oneof fields, we use a + * FileDescriptorResponse message to encapsulate the repeated fields. + * The reflection service is allowed to avoid sending FileDescriptorProtos + * that were previously sent in response to earlier requests in the stream. + */ + 'fileDescriptorResponse'?: (_grpc_reflection_v1_FileDescriptorResponse__Output | null); + /** + * This message is used to answer all_extension_numbers_of_type requests. + */ + 'allExtensionNumbersResponse'?: (_grpc_reflection_v1_ExtensionNumberResponse__Output | null); + /** + * This message is used to answer list_services requests. + */ + 'listServicesResponse'?: (_grpc_reflection_v1_ListServiceResponse__Output | null); + /** + * This message is used when an error occurs. + */ + 'errorResponse'?: (_grpc_reflection_v1_ErrorResponse__Output | null); + /** + * The server sets one of the following fields according to the message_request + * in the request. + */ + 'messageResponse': "fileDescriptorResponse"|"allExtensionNumbersResponse"|"listServicesResponse"|"errorResponse"; +} diff --git a/packages/grpc-reflection/src/generated/grpc/reflection/v1/ServiceResponse.ts b/packages/grpc-reflection/src/generated/grpc/reflection/v1/ServiceResponse.ts new file mode 100644 index 000000000..d529538e2 --- /dev/null +++ b/packages/grpc-reflection/src/generated/grpc/reflection/v1/ServiceResponse.ts @@ -0,0 +1,26 @@ +// Original file: proto/grpc/reflection/v1/reflection.proto + + +/** + * The information of a single service used by ListServiceResponse to answer + * list_services request. + */ +export interface ServiceResponse { + /** + * Full name of a registered service, including its package name. The format + * is . + */ + 'name'?: (string); +} + +/** + * The information of a single service used by ListServiceResponse to answer + * list_services request. + */ +export interface ServiceResponse__Output { + /** + * Full name of a registered service, including its package name. The format + * is . + */ + 'name': (string); +} diff --git a/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ErrorResponse.ts b/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ErrorResponse.ts new file mode 100644 index 000000000..dc6c3a2e2 --- /dev/null +++ b/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ErrorResponse.ts @@ -0,0 +1,24 @@ +// Original file: proto/grpc/reflection/v1alpha/reflection.proto + + +/** + * The error code and error message sent by the server when an error occurs. + */ +export interface ErrorResponse { + /** + * This field uses the error codes defined in grpc::StatusCode. + */ + 'errorCode'?: (number); + 'errorMessage'?: (string); +} + +/** + * The error code and error message sent by the server when an error occurs. + */ +export interface ErrorResponse__Output { + /** + * This field uses the error codes defined in grpc::StatusCode. + */ + 'errorCode': (number); + 'errorMessage': (string); +} diff --git a/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ExtensionNumberResponse.ts b/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ExtensionNumberResponse.ts new file mode 100644 index 000000000..b6c322c4e --- /dev/null +++ b/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ExtensionNumberResponse.ts @@ -0,0 +1,28 @@ +// Original file: proto/grpc/reflection/v1alpha/reflection.proto + + +/** + * A list of extension numbers sent by the server answering + * all_extension_numbers_of_type request. + */ +export interface ExtensionNumberResponse { + /** + * Full name of the base type, including the package name. The format + * is . + */ + 'baseTypeName'?: (string); + 'extensionNumber'?: (number)[]; +} + +/** + * A list of extension numbers sent by the server answering + * all_extension_numbers_of_type request. + */ +export interface ExtensionNumberResponse__Output { + /** + * Full name of the base type, including the package name. The format + * is . + */ + 'baseTypeName': (string); + 'extensionNumber': (number)[]; +} diff --git a/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ExtensionRequest.ts b/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ExtensionRequest.ts new file mode 100644 index 000000000..4a378b35a --- /dev/null +++ b/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ExtensionRequest.ts @@ -0,0 +1,26 @@ +// Original file: proto/grpc/reflection/v1alpha/reflection.proto + + +/** + * The type name and extension number sent by the client when requesting + * file_containing_extension. + */ +export interface ExtensionRequest { + /** + * Fully-qualified type name. The format should be . + */ + 'containingType'?: (string); + 'extensionNumber'?: (number); +} + +/** + * The type name and extension number sent by the client when requesting + * file_containing_extension. + */ +export interface ExtensionRequest__Output { + /** + * Fully-qualified type name. The format should be . + */ + 'containingType': (string); + 'extensionNumber': (number); +} diff --git a/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/FileDescriptorResponse.ts b/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/FileDescriptorResponse.ts new file mode 100644 index 000000000..cb1cc38c4 --- /dev/null +++ b/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/FileDescriptorResponse.ts @@ -0,0 +1,30 @@ +// Original file: proto/grpc/reflection/v1alpha/reflection.proto + + +/** + * Serialized FileDescriptorProto messages sent by the server answering + * a file_by_filename, file_containing_symbol, or file_containing_extension + * request. + */ +export interface FileDescriptorResponse { + /** + * Serialized FileDescriptorProto messages. We avoid taking a dependency on + * descriptor.proto, which uses proto2 only features, by making them opaque + * bytes instead. + */ + 'fileDescriptorProto'?: (Buffer | Uint8Array | string)[]; +} + +/** + * Serialized FileDescriptorProto messages sent by the server answering + * a file_by_filename, file_containing_symbol, or file_containing_extension + * request. + */ +export interface FileDescriptorResponse__Output { + /** + * Serialized FileDescriptorProto messages. We avoid taking a dependency on + * descriptor.proto, which uses proto2 only features, by making them opaque + * bytes instead. + */ + 'fileDescriptorProto': (Uint8Array)[]; +} diff --git a/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ListServiceResponse.ts b/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ListServiceResponse.ts new file mode 100644 index 000000000..7793a16eb --- /dev/null +++ b/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ListServiceResponse.ts @@ -0,0 +1,25 @@ +// Original file: proto/grpc/reflection/v1alpha/reflection.proto + +import type { ServiceResponse as _grpc_reflection_v1alpha_ServiceResponse, ServiceResponse__Output as _grpc_reflection_v1alpha_ServiceResponse__Output } from '../../../grpc/reflection/v1alpha/ServiceResponse'; + +/** + * A list of ServiceResponse sent by the server answering list_services request. + */ +export interface ListServiceResponse { + /** + * The information of each service may be expanded in the future, so we use + * ServiceResponse message to encapsulate it. + */ + 'service'?: (_grpc_reflection_v1alpha_ServiceResponse)[]; +} + +/** + * A list of ServiceResponse sent by the server answering list_services request. + */ +export interface ListServiceResponse__Output { + /** + * The information of each service may be expanded in the future, so we use + * ServiceResponse message to encapsulate it. + */ + 'service': (_grpc_reflection_v1alpha_ServiceResponse__Output)[]; +} diff --git a/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ServerReflection.ts b/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ServerReflection.ts new file mode 100644 index 000000000..2ab03e93c --- /dev/null +++ b/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ServerReflection.ts @@ -0,0 +1,9 @@ +// Original file: proto/grpc/reflection/v1alpha/reflection.proto + +import type { MethodDefinition } from '@grpc/proto-loader' +import type { ServerReflectionRequest as _grpc_reflection_v1alpha_ServerReflectionRequest, ServerReflectionRequest__Output as _grpc_reflection_v1alpha_ServerReflectionRequest__Output } from '../../../grpc/reflection/v1alpha/ServerReflectionRequest'; +import type { ServerReflectionResponse as _grpc_reflection_v1alpha_ServerReflectionResponse, ServerReflectionResponse__Output as _grpc_reflection_v1alpha_ServerReflectionResponse__Output } from '../../../grpc/reflection/v1alpha/ServerReflectionResponse'; + +export interface ServerReflectionDefinition { + ServerReflectionInfo: MethodDefinition<_grpc_reflection_v1alpha_ServerReflectionRequest, _grpc_reflection_v1alpha_ServerReflectionResponse, _grpc_reflection_v1alpha_ServerReflectionRequest__Output, _grpc_reflection_v1alpha_ServerReflectionResponse__Output> +} diff --git a/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ServerReflectionRequest.ts b/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ServerReflectionRequest.ts new file mode 100644 index 000000000..097d848d0 --- /dev/null +++ b/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ServerReflectionRequest.ts @@ -0,0 +1,91 @@ +// Original file: proto/grpc/reflection/v1alpha/reflection.proto + +import type { ExtensionRequest as _grpc_reflection_v1alpha_ExtensionRequest, ExtensionRequest__Output as _grpc_reflection_v1alpha_ExtensionRequest__Output } from '../../../grpc/reflection/v1alpha/ExtensionRequest'; + +/** + * The message sent by the client when calling ServerReflectionInfo method. + */ +export interface ServerReflectionRequest { + 'host'?: (string); + /** + * Find a proto file by the file name. + */ + 'fileByFilename'?: (string); + /** + * Find the proto file that declares the given fully-qualified symbol name. + * This field should be a fully-qualified symbol name + * (e.g. .[.] or .). + */ + 'fileContainingSymbol'?: (string); + /** + * Find the proto file which defines an extension extending the given + * message type with the given field number. + */ + 'fileContainingExtension'?: (_grpc_reflection_v1alpha_ExtensionRequest | null); + /** + * Finds the tag numbers used by all known extensions of the given message + * type, and appends them to ExtensionNumberResponse in an undefined order. + * Its corresponding method is best-effort: it's not guaranteed that the + * reflection service will implement this method, and it's not guaranteed + * that this method will provide all extensions. Returns + * StatusCode::UNIMPLEMENTED if it's not implemented. + * This field should be a fully-qualified type name. The format is + * . + */ + 'allExtensionNumbersOfType'?: (string); + /** + * List the full names of registered services. The content will not be + * checked. + */ + 'listServices'?: (string); + /** + * To use reflection service, the client should set one of the following + * fields in message_request. The server distinguishes requests by their + * defined field and then handles them using corresponding methods. + */ + 'messageRequest'?: "fileByFilename"|"fileContainingSymbol"|"fileContainingExtension"|"allExtensionNumbersOfType"|"listServices"; +} + +/** + * The message sent by the client when calling ServerReflectionInfo method. + */ +export interface ServerReflectionRequest__Output { + 'host': (string); + /** + * Find a proto file by the file name. + */ + 'fileByFilename'?: (string); + /** + * Find the proto file that declares the given fully-qualified symbol name. + * This field should be a fully-qualified symbol name + * (e.g. .[.] or .). + */ + 'fileContainingSymbol'?: (string); + /** + * Find the proto file which defines an extension extending the given + * message type with the given field number. + */ + 'fileContainingExtension'?: (_grpc_reflection_v1alpha_ExtensionRequest__Output | null); + /** + * Finds the tag numbers used by all known extensions of the given message + * type, and appends them to ExtensionNumberResponse in an undefined order. + * Its corresponding method is best-effort: it's not guaranteed that the + * reflection service will implement this method, and it's not guaranteed + * that this method will provide all extensions. Returns + * StatusCode::UNIMPLEMENTED if it's not implemented. + * This field should be a fully-qualified type name. The format is + * . + */ + 'allExtensionNumbersOfType'?: (string); + /** + * List the full names of registered services. The content will not be + * checked. + */ + 'listServices'?: (string); + /** + * To use reflection service, the client should set one of the following + * fields in message_request. The server distinguishes requests by their + * defined field and then handles them using corresponding methods. + */ + 'messageRequest': "fileByFilename"|"fileContainingSymbol"|"fileContainingExtension"|"allExtensionNumbersOfType"|"listServices"; +} diff --git a/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ServerReflectionResponse.ts b/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ServerReflectionResponse.ts new file mode 100644 index 000000000..eb81a0c2a --- /dev/null +++ b/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ServerReflectionResponse.ts @@ -0,0 +1,75 @@ +// Original file: proto/grpc/reflection/v1alpha/reflection.proto + +import type { ServerReflectionRequest as _grpc_reflection_v1alpha_ServerReflectionRequest, ServerReflectionRequest__Output as _grpc_reflection_v1alpha_ServerReflectionRequest__Output } from '../../../grpc/reflection/v1alpha/ServerReflectionRequest'; +import type { FileDescriptorResponse as _grpc_reflection_v1alpha_FileDescriptorResponse, FileDescriptorResponse__Output as _grpc_reflection_v1alpha_FileDescriptorResponse__Output } from '../../../grpc/reflection/v1alpha/FileDescriptorResponse'; +import type { ExtensionNumberResponse as _grpc_reflection_v1alpha_ExtensionNumberResponse, ExtensionNumberResponse__Output as _grpc_reflection_v1alpha_ExtensionNumberResponse__Output } from '../../../grpc/reflection/v1alpha/ExtensionNumberResponse'; +import type { ListServiceResponse as _grpc_reflection_v1alpha_ListServiceResponse, ListServiceResponse__Output as _grpc_reflection_v1alpha_ListServiceResponse__Output } from '../../../grpc/reflection/v1alpha/ListServiceResponse'; +import type { ErrorResponse as _grpc_reflection_v1alpha_ErrorResponse, ErrorResponse__Output as _grpc_reflection_v1alpha_ErrorResponse__Output } from '../../../grpc/reflection/v1alpha/ErrorResponse'; + +/** + * The message sent by the server to answer ServerReflectionInfo method. + */ +export interface ServerReflectionResponse { + 'validHost'?: (string); + 'originalRequest'?: (_grpc_reflection_v1alpha_ServerReflectionRequest | null); + /** + * This message is used to answer file_by_filename, file_containing_symbol, + * file_containing_extension requests with transitive dependencies. As + * the repeated label is not allowed in oneof fields, we use a + * FileDescriptorResponse message to encapsulate the repeated fields. + * The reflection service is allowed to avoid sending FileDescriptorProtos + * that were previously sent in response to earlier requests in the stream. + */ + 'fileDescriptorResponse'?: (_grpc_reflection_v1alpha_FileDescriptorResponse | null); + /** + * This message is used to answer all_extension_numbers_of_type requst. + */ + 'allExtensionNumbersResponse'?: (_grpc_reflection_v1alpha_ExtensionNumberResponse | null); + /** + * This message is used to answer list_services request. + */ + 'listServicesResponse'?: (_grpc_reflection_v1alpha_ListServiceResponse | null); + /** + * This message is used when an error occurs. + */ + 'errorResponse'?: (_grpc_reflection_v1alpha_ErrorResponse | null); + /** + * The server set one of the following fields accroding to the message_request + * in the request. + */ + 'messageResponse'?: "fileDescriptorResponse"|"allExtensionNumbersResponse"|"listServicesResponse"|"errorResponse"; +} + +/** + * The message sent by the server to answer ServerReflectionInfo method. + */ +export interface ServerReflectionResponse__Output { + 'validHost': (string); + 'originalRequest': (_grpc_reflection_v1alpha_ServerReflectionRequest__Output | null); + /** + * This message is used to answer file_by_filename, file_containing_symbol, + * file_containing_extension requests with transitive dependencies. As + * the repeated label is not allowed in oneof fields, we use a + * FileDescriptorResponse message to encapsulate the repeated fields. + * The reflection service is allowed to avoid sending FileDescriptorProtos + * that were previously sent in response to earlier requests in the stream. + */ + 'fileDescriptorResponse'?: (_grpc_reflection_v1alpha_FileDescriptorResponse__Output | null); + /** + * This message is used to answer all_extension_numbers_of_type requst. + */ + 'allExtensionNumbersResponse'?: (_grpc_reflection_v1alpha_ExtensionNumberResponse__Output | null); + /** + * This message is used to answer list_services request. + */ + 'listServicesResponse'?: (_grpc_reflection_v1alpha_ListServiceResponse__Output | null); + /** + * This message is used when an error occurs. + */ + 'errorResponse'?: (_grpc_reflection_v1alpha_ErrorResponse__Output | null); + /** + * The server set one of the following fields accroding to the message_request + * in the request. + */ + 'messageResponse': "fileDescriptorResponse"|"allExtensionNumbersResponse"|"listServicesResponse"|"errorResponse"; +} diff --git a/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ServiceResponse.ts b/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ServiceResponse.ts new file mode 100644 index 000000000..ff35cf522 --- /dev/null +++ b/packages/grpc-reflection/src/generated/grpc/reflection/v1alpha/ServiceResponse.ts @@ -0,0 +1,26 @@ +// Original file: proto/grpc/reflection/v1alpha/reflection.proto + + +/** + * The information of a single service used by ListServiceResponse to answer + * list_services request. + */ +export interface ServiceResponse { + /** + * Full name of a registered service, including its package name. The format + * is . + */ + 'name'?: (string); +} + +/** + * The information of a single service used by ListServiceResponse to answer + * list_services request. + */ +export interface ServiceResponse__Output { + /** + * Full name of a registered service, including its package name. The format + * is . + */ + 'name': (string); +} diff --git a/packages/grpc-reflection/src/implementations/common/constants.ts b/packages/grpc-reflection/src/implementations/common/constants.ts new file mode 100644 index 000000000..93ad27825 --- /dev/null +++ b/packages/grpc-reflection/src/implementations/common/constants.ts @@ -0,0 +1,14 @@ +import * as protoLoader from '@grpc/proto-loader'; + +/** Options to use when loading protobuf files in this repo +* +* @remarks *must* match the proto-loader-gen-types usage in the package.json +* otherwise the generated types may not match the data coming into this service +*/ +export const PROTO_LOADER_OPTS: protoLoader.Options = { + longs: String, + enums: String, + bytes: Array, + defaults: true, + oneofs: true +}; diff --git a/packages/grpc-reflection/src/implementations/common/interfaces.ts b/packages/grpc-reflection/src/implementations/common/interfaces.ts new file mode 100644 index 000000000..c6b07bccb --- /dev/null +++ b/packages/grpc-reflection/src/implementations/common/interfaces.ts @@ -0,0 +1,5 @@ +/** Options to create a reflection server */ +export interface ReflectionServerOptions { + /** whitelist of fully-qualified service names to expose. (Default: expose all) */ + services?: string[]; +} diff --git a/packages/grpc-reflection/src/implementations/common/protobuf-visitor.ts b/packages/grpc-reflection/src/implementations/common/protobuf-visitor.ts new file mode 100644 index 000000000..451a2c0ea --- /dev/null +++ b/packages/grpc-reflection/src/implementations/common/protobuf-visitor.ts @@ -0,0 +1,109 @@ +import { + IDescriptorProto, + IEnumDescriptorProto, + IEnumValueDescriptorProto, + IFieldDescriptorProto, + IFileDescriptorProto, + IMethodDescriptorProto, + IOneofDescriptorProto, + IServiceDescriptorProto, +} from 'protobufjs/ext/descriptor'; + +/** A set of functions for operating on protobuf objects as we visit them in a traversal */ +interface Visitor { + field?: (fqn: string, file: IFileDescriptorProto, field: IFieldDescriptorProto) => void; + extension?: (fqn: string, file: IFileDescriptorProto, extension: IFieldDescriptorProto) => void; + oneOf?: (fqn: string, file: IFileDescriptorProto, decl: IOneofDescriptorProto) => void; + message?: (fqn: string, file: IFileDescriptorProto, msg: IDescriptorProto) => void; + enum?: (fqn: string, file: IFileDescriptorProto, msg: IEnumDescriptorProto) => void; + enumValue?: (fqn: string, file: IFileDescriptorProto, msg: IEnumValueDescriptorProto) => void; + service?: (fqn: string, file: IFileDescriptorProto, msg: IServiceDescriptorProto) => void; + method?: (fqn: string, file: IFileDescriptorProto, method: IMethodDescriptorProto) => void; +} + +/** Visit each node in a protobuf file and perform an operation on it + * + * This is useful because protocol buffers has nested objects so if we need to + * traverse them multiple times then we don't want to duplicate that traversal + * logic + * + * @see Visitor for the interface to interact with the nodes + */ +export const visit = (file: IFileDescriptorProto, visitor: Visitor): void => { + const processField = (prefix: string, file: IFileDescriptorProto, field: IFieldDescriptorProto) => { + const fqn = `${prefix}.${field.name}`; + if (visitor.field) { + visitor.field(fqn, file, field); + } + }; + + const processExtension = ( + prefix: string, + file: IFileDescriptorProto, + ext: IFieldDescriptorProto, + ) => { + const fqn = `${prefix}.${ext.name}`; + if (visitor.extension) { + visitor.extension(fqn, file, ext); + } + }; + + const processOneOf = (prefix: string, file: IFileDescriptorProto, decl: IOneofDescriptorProto) => { + const fqn = `${prefix}.${decl.name}`; + if (visitor.oneOf) { + visitor.oneOf(fqn, file, decl); + } + }; + + const processEnum = (prefix: string, file: IFileDescriptorProto, decl: IEnumDescriptorProto) => { + const fqn = `${prefix}.${decl.name}`; + + if (visitor.enum) { + visitor.enum(fqn, file, decl); + } + + decl.value?.forEach((value) => { + const valueFqn = `${fqn}.${value.name}`; + if (visitor.enumValue) { + visitor.enumValue(valueFqn, file, value); + } + }); + }; + + const processMessage = (prefix: string, file: IFileDescriptorProto, msg: IDescriptorProto) => { + const fqn = `${prefix}.${msg.name}`; + if (visitor.message) { + visitor.message(fqn, file, msg); + } + + msg.nestedType?.forEach((type) => processMessage(fqn, file, type)); + msg.enumType?.forEach((type) => processEnum(fqn, file, type)); + msg.field?.forEach((field) => processField(fqn, file, field)); + msg.oneofDecl?.forEach((decl) => processOneOf(fqn, file, decl)); + msg.extension?.forEach((ext) => processExtension(fqn, file, ext)); + }; + + const processService = ( + prefix: string, + file: IFileDescriptorProto, + service: IServiceDescriptorProto, + ) => { + const fqn = `${prefix}.${service.name}`; + if (visitor.service) { + visitor.service(fqn, file, service); + } + + service.method?.forEach((method) => { + const methodFqn = `${fqn}.${method.name}`; + if (visitor.method) { + visitor.method(methodFqn, file, method); + } + }); + }; + + const packageName = file.package || ''; + file.enumType?.forEach((type) => processEnum(packageName, file, type)); + file.messageType?.forEach((type) => processMessage(packageName, file, type)); + file.service?.forEach((service) => processService(packageName, file, service)); + file.extension?.forEach((ext) => processExtension(packageName, file, ext)); +}; diff --git a/packages/grpc-reflection/src/implementations/common/utils.ts b/packages/grpc-reflection/src/implementations/common/utils.ts new file mode 100644 index 000000000..4490abd6e --- /dev/null +++ b/packages/grpc-reflection/src/implementations/common/utils.ts @@ -0,0 +1,11 @@ +/** Gets the package scope for a type name + * + * @example scope('grpc.reflection.v1.Type') == 'grpc.reflection.v1' + */ +export const scope = (path: string, separator: string = '.') => { + if (!path.includes(separator)) { + return ''; + } + + return path.split(separator).slice(0, -1).join(separator); +}; diff --git a/packages/grpc-reflection/src/implementations/reflection-v1.ts b/packages/grpc-reflection/src/implementations/reflection-v1.ts new file mode 100644 index 000000000..3858d7f42 --- /dev/null +++ b/packages/grpc-reflection/src/implementations/reflection-v1.ts @@ -0,0 +1,333 @@ +import * as path from 'path'; +import { + FileDescriptorProto, + IFileDescriptorProto, + IServiceDescriptorProto +} from 'protobufjs/ext/descriptor'; + +import * as grpc from '@grpc/grpc-js'; +import * as protoLoader from '@grpc/proto-loader'; + +import { ExtensionNumberResponse__Output } from '../generated/grpc/reflection/v1/ExtensionNumberResponse'; +import { FileDescriptorResponse__Output } from '../generated/grpc/reflection/v1/FileDescriptorResponse'; +import { ListServiceResponse__Output } from '../generated/grpc/reflection/v1/ListServiceResponse'; +import { ServerReflectionRequest } from '../generated/grpc/reflection/v1/ServerReflectionRequest'; +import { ServerReflectionResponse } from '../generated/grpc/reflection/v1/ServerReflectionResponse'; +import { visit } from './common/protobuf-visitor'; +import { scope } from './common/utils'; +import { PROTO_LOADER_OPTS } from './common/constants'; +import { ReflectionServerOptions } from './common/interfaces'; + +export class ReflectionError extends Error { + constructor( + readonly statusCode: grpc.status, + readonly message: string, + ) { + super(message); + } +} + +/** Analyzes a gRPC package definition and exposes methods to reflect on it + * + * NOTE: the files returned by this service may not match the handwritten ones 1:1. + * This is because proto-loader reorients files based on their package definition, + * combining any that have the same package. + * + * For example: if files 'a.proto' and 'b.proto' are both for the same package 'c' then + * we will always return a reference to a combined 'c.proto' instead of the 2 files. + */ +export class ReflectionV1Implementation { + + /** An index of proto files by file name (eg. 'sample.proto') */ + private readonly files: Record = {}; + + /** A graph of file dependencies */ + private readonly fileDependencies = new Map(); + + /** Pre-computed encoded-versions of each file */ + private readonly fileEncodings = new Map(); + + /** An index of proto files by type extension relationship + * + * extensionIndex[.][] contains a reference to the file containing an + * extension for the type "." and field number "" + */ + private readonly extensions: Record> = {}; + + /** An index of fully qualified symbol names (eg. 'sample.Message') to the files that contain them */ + private readonly symbols: Record = {}; + + /** An index of the services in the analyzed package(s) */ + private readonly services: Record = {}; + + + constructor(root: protoLoader.PackageDefinition, options?: ReflectionServerOptions) { + Object.values(root).forEach(({ fileDescriptorProtos }) => { + if (Array.isArray(fileDescriptorProtos)) { // we use an array check to narrow the type + fileDescriptorProtos.forEach((bin) => { + const proto = FileDescriptorProto.decode(bin) as IFileDescriptorProto; + + if (proto.name && !this.files[proto.name]) { + this.files[proto.name] = proto; + } + }); + } + }); + + // Pass 1: Index Values + const serviceWhitelist = new Set(options?.services); + const index = (fqn: string, file: IFileDescriptorProto) => (this.symbols[fqn] = file); + Object.values(this.files).forEach((file) => + visit(file, { + field: index, + oneOf: index, + message: index, + method: index, + enum: index, + enumValue: index, + service: (fqn, file, service) => { + index(fqn, file); + + if (options?.services === undefined || serviceWhitelist.has(fqn)) { + this.services[fqn] = service; + } + }, + extension: (fqn, file, ext) => { + index(fqn, file); + + const extendeeName = ext.extendee || ''; + this.extensions[extendeeName] = { + ...(this.extensions[extendeeName] || {}), + [ext.number || -1]: file, + }; + }, + }), + ); + + // Pass 2: Link References To Values + // NOTE: this should be unnecessary after https://github.com/grpc/grpc-node/issues/2595 is resolved + const addReference = (ref: string, sourceFile: IFileDescriptorProto, pkgScope: string) => { + if (!ref) { + return; // nothing to do + } + + let referencedFile: IFileDescriptorProto | null = null; + if (ref.startsWith('.')) { + // absolute reference -- just remove the leading '.' and use the ref directly + referencedFile = this.symbols[ref.replace(/^\./, '')]; + } else { + // relative reference -- need to seek upwards up the current package scope until we find it + let pkg = pkgScope; + while (pkg && !referencedFile) { + referencedFile = this.symbols[`${pkg}.${ref}`]; + pkg = scope(pkg); + } + + // if we didn't find anything then try just a FQN lookup + if (!referencedFile) { + referencedFile = this.symbols[ref]; + } + } + + if (!referencedFile) { + console.warn(`Could not find file associated with reference ${ref}`); + return; + } + + if (referencedFile !== sourceFile) { + const existingDeps = this.fileDependencies.get(sourceFile) || []; + this.fileDependencies.set(sourceFile, [referencedFile, ...existingDeps]); + } + }; + + Object.values(this.files).forEach((file) => + visit(file, { + field: (fqn, file, field) => addReference(field.typeName || '', file, scope(fqn)), + extension: (fqn, file, ext) => addReference(ext.typeName || '', file, scope(fqn)), + method: (fqn, file, method) => { + addReference(method.inputType || '', file, scope(fqn)); + addReference(method.outputType || '', file, scope(fqn)); + }, + }), + ); + + // Pass 3: pre-compute file encoding since that can be slow and is done frequently + Object.values(this.files).forEach(file => { + this.fileEncodings.set(file, FileDescriptorProto.encode(file).finish()) + }); + } + + addToServer(server: Pick) { + const protoPath = path.join(__dirname, '../../proto/grpc/reflection/v1/reflection.proto'); + const pkgDefinition = protoLoader.loadSync(protoPath, PROTO_LOADER_OPTS); + const pkg = grpc.loadPackageDefinition(pkgDefinition) as any; + + server.addService(pkg.grpc.reflection.v1.ServerReflection.service, { + ServerReflectionInfo: ( + stream: grpc.ServerDuplexStream + ) => { + stream.on('end', () => stream.end()); + + stream.on('data', (message: ServerReflectionRequest) => { + stream.write(this.handleServerReflectionRequest(message)); + }); + } + }); + } + + /** Assemble a response for a single server reflection request in the stream */ + handleServerReflectionRequest(message: ServerReflectionRequest): ServerReflectionResponse { + const response: ServerReflectionResponse = { + validHost: message.host, + originalRequest: message + }; + + try { + switch(message.messageRequest) { + case 'listServices': + response.listServicesResponse = this.listServices(message.listServices || ''); + break; + case 'fileContainingSymbol': + response.fileDescriptorResponse = this.fileContainingSymbol(message.fileContainingSymbol || ''); + break; + case 'fileByFilename': + response.fileDescriptorResponse = this.fileByFilename(message.fileByFilename || ''); + break; + case 'fileContainingExtension': + response.fileDescriptorResponse = this.fileContainingExtension( + message.fileContainingExtension?.containingType || '', + message.fileContainingExtension?.extensionNumber || -1 + ); + break; + case 'allExtensionNumbersOfType': + response.allExtensionNumbersResponse = this.allExtensionNumbersOfType(message.allExtensionNumbersOfType || ''); + break; + default: + throw new ReflectionError( + grpc.status.UNIMPLEMENTED, + `Unimplemented method for request: ${message.messageRequest}`, + ); + } + } catch (e) { + if (e instanceof ReflectionError) { + response.errorResponse = { + errorCode: e.statusCode, + errorMessage: e.message, + }; + } else { + response.errorResponse = { + errorCode: grpc.status.UNKNOWN, + errorMessage: 'Failed to process gRPC reflection request: unknown error', + }; + } + } + + return response; + } + + /** List the full names of registered gRPC services + * + * note: the spec is unclear as to what the 'listServices' param can be; most + * clients seem to only pass '*' but unsure if this should behave like a + * filter. Until we know how this should behave with different inputs this + * just always returns *all* services. + * + * @returns full-qualified service names (eg. 'sample.SampleService') + */ + listServices(listServices: string): ListServiceResponse__Output { + return { service: Object.keys(this.services).map((service) => ({ name: service })) }; + } + + /** Find the proto file(s) that declares the given fully-qualified symbol name + * + * @param symbol fully-qualified name of the symbol to lookup + * (e.g. package.service[.method] or package.type) + * + * @returns descriptors of the file which contains this symbol and its imports + */ + fileContainingSymbol(symbol: string): FileDescriptorResponse__Output { + const file = this.symbols[symbol]; + + if (!file) { + throw new ReflectionError(grpc.status.NOT_FOUND, `Symbol not found: ${symbol}`); + } + + const deps = this.getFileDependencies(file); + + return { + fileDescriptorProto: [file, ...deps].map((file) => this.fileEncodings.get(file) || new Uint8Array()) + }; + } + + /** Find a proto file by the file name + * + * @returns descriptors of the file which contains this symbol and its imports + */ + fileByFilename(filename: string): FileDescriptorResponse__Output { + const file = this.files[filename]; + + if (!file) { + throw new ReflectionError(grpc.status.NOT_FOUND, `Proto file not found: ${filename}`); + } + + const deps = this.getFileDependencies(file); + + return { + fileDescriptorProto: [file, ...deps].map((file) => this.fileEncodings.get(file) || new Uint8Array), + }; + } + + /** Find a proto file containing an extension to a message type + * + * @returns descriptors of the file which contains this symbol and its imports + */ + fileContainingExtension(symbol: string, field: number): FileDescriptorResponse__Output { + const extensionsByFieldNumber = this.extensions[symbol] || {}; + const file = extensionsByFieldNumber[field]; + + if (!file) { + throw new ReflectionError( + grpc.status.NOT_FOUND, + `Extension not found for symbol ${symbol} at field ${field}`, + ); + } + + const deps = this.getFileDependencies(file); + + return { + fileDescriptorProto: [file, ...deps].map((file) => this.fileEncodings.get(file) || new Uint8Array()), + }; + } + + allExtensionNumbersOfType(symbol: string): ExtensionNumberResponse__Output { + if (!(symbol in this.extensions)) { + throw new ReflectionError(grpc.status.NOT_FOUND, `Extensions not found for symbol ${symbol}`); + } + + const fieldNumbers = Object.keys(this.extensions[symbol]).map((key) => Number(key)); + + return { + baseTypeName: symbol, + extensionNumber: fieldNumbers, + }; + } + + private getFileDependencies(file: IFileDescriptorProto): IFileDescriptorProto[] { + const visited: Set = new Set(); + const toVisit: IFileDescriptorProto[] = [...(this.fileDependencies.get(file) || [])]; + + while (toVisit.length > 0) { + const current = toVisit.pop(); + + if (!current || visited.has(current)) { + continue; + } + + visited.add(current); + toVisit.push(...this.fileDependencies.get(current)?.filter((dep) => !visited.has(dep)) || []); + } + + return Array.from(visited); + } + +} diff --git a/packages/grpc-reflection/src/implementations/reflection-v1alpha.ts b/packages/grpc-reflection/src/implementations/reflection-v1alpha.ts new file mode 100644 index 000000000..f3fdcafe0 --- /dev/null +++ b/packages/grpc-reflection/src/implementations/reflection-v1alpha.ts @@ -0,0 +1,42 @@ +import * as path from 'path'; + +import * as grpc from '@grpc/grpc-js'; +import * as protoLoader from '@grpc/proto-loader'; + +import { ServerReflectionRequest } from '../generated/grpc/reflection/v1/ServerReflectionRequest'; +import { ServerReflectionResponse } from '../generated/grpc/reflection/v1/ServerReflectionResponse'; +import { PROTO_LOADER_OPTS } from './common/constants'; +import { ReflectionV1Implementation } from './reflection-v1'; + + +/** Analyzes a gRPC server and exposes methods to reflect on it + * + * NOTE: the files returned by this service may not match the handwritten ones 1:1. + * This is because proto-loader reorients files based on their package definition, + * combining any that have the same package. + * + * For example: if files 'a.proto' and 'b.proto' are both for the same package 'c' then + * we will always return a reference to a combined 'c.proto' instead of the 2 files. + * + * @privateRemarks as the v1 and v1alpha specs are identical, this implementation extends + * reflection-v1 and exposes it at the v1alpha package instead + */ +export class ReflectionV1AlphaImplementation extends ReflectionV1Implementation { + addToServer(server: Pick) { + const protoPath = path.join(__dirname, '../../proto/grpc/reflection/v1alpha/reflection.proto'); + const pkgDefinition = protoLoader.loadSync(protoPath, PROTO_LOADER_OPTS); + const pkg = grpc.loadPackageDefinition(pkgDefinition) as any; + + server.addService(pkg.grpc.reflection.v1alpha.ServerReflection.service, { + ServerReflectionInfo: ( + stream: grpc.ServerDuplexStream + ) => { + stream.on('end', () => stream.end()); + + stream.on('data', (message: ServerReflectionRequest) => { + stream.write(this.handleServerReflectionRequest(message)); + }); + } + }); + } +} diff --git a/packages/grpc-reflection/src/index.ts b/packages/grpc-reflection/src/index.ts new file mode 100644 index 000000000..deba7fe33 --- /dev/null +++ b/packages/grpc-reflection/src/index.ts @@ -0,0 +1 @@ +export { ReflectionService } from './service'; diff --git a/packages/grpc-reflection/src/service.ts b/packages/grpc-reflection/src/service.ts new file mode 100644 index 000000000..616d210de --- /dev/null +++ b/packages/grpc-reflection/src/service.ts @@ -0,0 +1,38 @@ +import * as grpc from '@grpc/grpc-js'; +import * as protoLoader from '@grpc/proto-loader'; + +import { ReflectionV1Implementation } from './implementations/reflection-v1'; +import { ReflectionV1AlphaImplementation } from './implementations/reflection-v1alpha'; +import { ReflectionServerOptions } from './implementations/common/interfaces'; + +/** Analyzes a gRPC package and exposes endpoints providing information about + * it according to the gRPC Server Reflection API Specification + * + * @see https://github.com/grpc/grpc/blob/master/doc/server-reflection.md + * + * @remarks + * + * in order to keep backwards compatibility as the reflection schema evolves + * this service contains implementations for each of the published versions + * + * @privateRemarks + * + * this class acts mostly as a facade to several underlying implementations. This + * allows us to add or remove support for different versions of the reflection + * schema without affecting the consumer + * + */ +export class ReflectionService { + private readonly v1: ReflectionV1Implementation; + private readonly v1Alpha: ReflectionV1AlphaImplementation; + + constructor(pkg: protoLoader.PackageDefinition, options?: ReflectionServerOptions) { + this.v1 = new ReflectionV1Implementation(pkg, options); + this.v1Alpha = new ReflectionV1AlphaImplementation(pkg, options); + } + + addToServer(server: Pick) { + this.v1.addToServer(server); + this.v1Alpha.addToServer(server); + } +} diff --git a/packages/grpc-reflection/test/test-reflection-v1-implementation.ts b/packages/grpc-reflection/test/test-reflection-v1-implementation.ts new file mode 100644 index 000000000..552160f74 --- /dev/null +++ b/packages/grpc-reflection/test/test-reflection-v1-implementation.ts @@ -0,0 +1,192 @@ +import * as assert from 'assert'; +import * as path from 'path'; +import { FileDescriptorProto, IFileDescriptorProto } from 'protobufjs/ext/descriptor'; +import * as protoLoader from '@grpc/proto-loader'; + +import { ReflectionV1Implementation } from '../src/implementations/reflection-v1'; + +describe('GrpcReflectionService', () => { + let reflectionService: ReflectionV1Implementation; + + beforeEach(async () => { + const root = protoLoader.loadSync(path.join(__dirname, '../proto/sample/sample.proto'), { + includeDirs: [path.join(__dirname, '../proto/sample/vendor')] + }); + + reflectionService = new ReflectionV1Implementation(root); + }); + + describe('listServices()', () => { + it('lists all services', () => { + const { service: services } = reflectionService.listServices('*'); + assert.equal(services.length, 2); + assert(services.find((s) => s.name === 'sample.SampleService')); + }); + + it('whitelists services properly', () => { + const root = protoLoader.loadSync(path.join(__dirname, '../proto/sample/sample.proto'), { + includeDirs: [path.join(__dirname, '../proto/sample/vendor')] + }); + + reflectionService = new ReflectionV1Implementation(root, { services: ['sample.SampleService'] }); + + const { service: services } = reflectionService.listServices('*'); + assert.equal(services.length, 1); + assert(services.find((s) => s.name === 'sample.SampleService')); + }); + }); + + describe('fileByFilename()', () => { + it('finds files with transitive dependencies', () => { + const descriptors = reflectionService + .fileByFilename('sample.proto') + .fileDescriptorProto.map(f => FileDescriptorProto.decode(f) as IFileDescriptorProto); + + const names = descriptors.map((desc) => desc.name); + assert.deepEqual( + new Set(names), + new Set(['sample.proto', 'vendor.proto', 'vendor_dependency.proto']) + ); + }); + + it('finds files with fewer transitive dependencies', () => { + const descriptors = reflectionService + .fileByFilename('vendor.proto') + .fileDescriptorProto.map(f => FileDescriptorProto.decode(f) as IFileDescriptorProto); + + const names = descriptors.map((desc) => desc.name); + assert.deepEqual(new Set(names), new Set(['vendor.proto', 'vendor_dependency.proto'])); + }); + + it('finds files with no transitive dependencies', () => { + const descriptors = reflectionService + .fileByFilename('vendor_dependency.proto') + .fileDescriptorProto.map(f => FileDescriptorProto.decode(f) as IFileDescriptorProto); + + assert.equal(descriptors.length, 1); + assert.equal(descriptors[0].name, 'vendor_dependency.proto'); + }); + + it('merges files based on package name', () => { + const descriptors = reflectionService + .fileByFilename('vendor.proto') + .fileDescriptorProto.map(f => FileDescriptorProto.decode(f) as IFileDescriptorProto); + + const names = descriptors.map((desc) => desc.name); + assert(!names.includes('common.proto')); // file merged into vendor.proto + }); + + it('errors with no file found', () => { + assert.throws( + () => reflectionService.fileByFilename('nonexistent.proto'), + 'Proto file not found', + ); + }); + }); + + describe('fileContainingSymbol()', () => { + it('finds symbols and returns transitive file dependencies', () => { + const descriptors = reflectionService + .fileContainingSymbol('sample.HelloRequest') + .fileDescriptorProto.map(f => FileDescriptorProto.decode(f) as IFileDescriptorProto); + + const names = descriptors.map((desc) => desc.name); + assert.deepEqual( + new Set(names), + new Set(['sample.proto', 'vendor.proto', 'vendor_dependency.proto']), + ); + }); + + it('finds imported message types', () => { + const descriptors = reflectionService + .fileContainingSymbol('vendor.CommonMessage') + .fileDescriptorProto.map(f => FileDescriptorProto.decode(f) as IFileDescriptorProto); + + const names = descriptors.map((desc) => desc.name); + assert.deepEqual(new Set(names), new Set(['vendor.proto', 'vendor_dependency.proto'])); + }); + + it('finds transitively imported message types', () => { + const descriptors = reflectionService + .fileContainingSymbol('vendor.dependency.DependentMessage') + .fileDescriptorProto.map(f => FileDescriptorProto.decode(f) as IFileDescriptorProto); + + assert.equal(descriptors.length, 1); + assert.equal(descriptors[0].name, 'vendor_dependency.proto'); + }); + + it('finds nested message types', () => { + const descriptors = reflectionService + .fileContainingSymbol('sample.HelloRequest.HelloNested') + .fileDescriptorProto.map(f => FileDescriptorProto.decode(f) as IFileDescriptorProto); + + const names = descriptors.map((desc) => desc.name); + assert.deepEqual( + new Set(names), + new Set(['sample.proto', 'vendor.proto', 'vendor_dependency.proto']), + ); + }); + + it('merges files based on package name', () => { + const descriptors = reflectionService + .fileContainingSymbol('vendor.CommonMessage') + .fileDescriptorProto.map(f => FileDescriptorProto.decode(f) as IFileDescriptorProto); + + const names = descriptors.map((desc) => desc.name); + assert(!names.includes('common.proto')); // file merged into vendor.proto + }); + + it('errors with no symbol found', () => { + assert.throws( + () => reflectionService.fileContainingSymbol('non.existant.symbol'), + 'Symbol not found:', + ); + }); + + it('resolves references to method types', () => { + const descriptors = reflectionService + .fileContainingSymbol('sample.SampleService.Hello2') + .fileDescriptorProto.map(f => FileDescriptorProto.decode(f) as IFileDescriptorProto); + + const names = descriptors.map((desc) => desc.name); + assert.deepEqual( + new Set(names), + new Set(['sample.proto', 'vendor.proto', 'vendor_dependency.proto']), + ); + }); + }); + + describe('fileContainingExtension()', () => { + it('finds extensions and returns transitive file dependencies', () => { + const descriptors = reflectionService + .fileContainingExtension('.vendor.CommonMessage', 101) + .fileDescriptorProto.map(f => FileDescriptorProto.decode(f) as IFileDescriptorProto); + + const names = descriptors.map((desc) => desc.name); + assert.deepEqual(new Set(names), new Set(['vendor.proto', 'vendor_dependency.proto'])); + }); + + it('errors with no symbol found', () => { + assert.throws( + () => reflectionService.fileContainingExtension('non.existant.symbol', 0), + 'Extension not found', + ); + }); + }); + + describe('allExtensionNumbersOfType()', () => { + it('finds extensions and returns transitive file dependencies', () => { + const response = reflectionService.allExtensionNumbersOfType('.vendor.CommonMessage'); + + assert.equal(response.extensionNumber.length, 1); + assert.equal(response.extensionNumber[0], 101); + }); + + it('errors with no symbol found', () => { + assert.throws( + () => reflectionService.allExtensionNumbersOfType('non.existant.symbol'), + 'Extensions not found', + ); + }); + }); +}); diff --git a/packages/grpc-reflection/test/test-utils.ts b/packages/grpc-reflection/test/test-utils.ts new file mode 100644 index 000000000..2f197652a --- /dev/null +++ b/packages/grpc-reflection/test/test-utils.ts @@ -0,0 +1,14 @@ +import * as assert from 'assert'; + +import { scope } from '../src/implementations/common/utils'; + +describe('scope', () => { + it('traverses upwards in the package scope', () => { + assert.strictEqual(scope('grpc.health.v1.HealthCheckResponse.ServiceStatus'), 'grpc.health.v1.HealthCheckResponse'); + assert.strictEqual(scope(scope(scope(scope('grpc.health.v1.HealthCheckResponse.ServiceStatus')))), 'grpc'); + }); + it('returns an empty package when at the top', () => { + assert.strictEqual(scope('Message'), ''); + assert.strictEqual(scope(''), ''); + }); +}); diff --git a/packages/grpc-reflection/tsconfig.json b/packages/grpc-reflection/tsconfig.json new file mode 100644 index 000000000..763ceda98 --- /dev/null +++ b/packages/grpc-reflection/tsconfig.json @@ -0,0 +1,29 @@ +{ + "compilerOptions": { + "allowUnreachableCode": false, + "allowUnusedLabels": false, + "declaration": true, + "forceConsistentCasingInFileNames": true, + "noEmitOnError": true, + "noFallthroughCasesInSwitch": true, + "noImplicitReturns": true, + "pretty": true, + "sourceMap": true, + "strict": true, + "lib": ["es2017"], + "outDir": "build", + "target": "es2017", + "module": "commonjs", + "resolveJsonModule": true, + "incremental": true, + "types": ["mocha"], + "noUnusedLocals": true + }, + "include": [ + "src/**/*.ts", + "test/**/*.ts" + ], + "exclude": [ + "node_modules" + ] +} diff --git a/packages/grpc-tools/.gitignore b/packages/grpc-tools/.gitignore new file mode 100644 index 000000000..5be16bd8a --- /dev/null +++ b/packages/grpc-tools/.gitignore @@ -0,0 +1,7 @@ +CMakeFiles/ +cmake_install.cmake +CMakeCache.txt +Makefile +grpc_node_plugin +protoc +deps/protobuf diff --git a/packages/grpc-tools/CMakeLists.txt b/packages/grpc-tools/CMakeLists.txt index 9d6690ba7..e13d9a28d 100644 --- a/packages/grpc-tools/CMakeLists.txt +++ b/packages/grpc-tools/CMakeLists.txt @@ -1,37 +1,60 @@ -cmake_minimum_required(VERSION 3.6) -if(COMMAND cmake_policy) - cmake_policy(SET CMP0003 NEW) -endif(COMMAND cmake_policy) +CMAKE_MINIMUM_REQUIRED(VERSION 3.7) +PROJECT("grpc-tools") -set(CMAKE_CXX_STANDARD 11) -set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_EXTENSIONS OFF) +SET(CMAKE_OSX_DEPLOYMENT_TARGET "11.7" CACHE STRING "Minimum OS X deployment version" FORCE) -set(PROTOBUF_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/deps/protobuf) -add_subdirectory(${PROTOBUF_ROOT_DIR}/cmake deps/protobuf) +IF(COMMAND CMAKE_POLICY) + CMAKE_POLICY(SET CMP0003 NEW) +ENDIF(COMMAND CMAKE_POLICY) -set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build protobuf tests") -set(protobuf_WITH_ZLIB OFF CACHE BOOL "Build protobuf with zlib.") +# MSVC runtime library flags are selected by an abstraction. +IF(COMMAND CMAKE_POLICY AND POLICY CMP0091) + CMAKE_POLICY(SET CMP0091 NEW) +ENDIF() -set(CMAKE_EXE_LINKER_FLAGS "-static-libstdc++") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-stack-protector") +SET(CMAKE_CXX_STANDARD 11) +SET(CMAKE_CXX_STANDARD_REQUIRED ON) +SET(CMAKE_CXX_EXTENSIONS OFF) -add_executable(grpc_node_plugin - src/node_generator.cc - src/node_plugin.cc -) +SET(protobuf_BUILD_TESTS OFF CACHE BOOL "Build protobuf tests") +SET(protobuf_WITH_ZLIB OFF CACHE BOOL "Build protobuf with zlib.") +SET(PROTOBUF_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/deps/protobuf) + +ADD_SUBDIRECTORY(${PROTOBUF_ROOT_DIR}/cmake deps/protobuf) -if (MSVC) - add_definitions(/MTd) -endif (MSVC) +SET(CMAKE_EXE_LINKER_FLAGS "-static-libstdc++") +SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-stack-protector") -target_include_directories(grpc_node_plugin +ADD_EXECUTABLE(grpc_node_plugin + src/node_generator.cc + src/node_plugin.cc +) + +IF(MSVC) + IF(CMAKE_VERSION VERSION_GREATER_EQUAL 3.15) + SET(CMAKE_MSVC_RUNTIME_LIBRARY MultiThreaded$<$:Debug>) + ELSE() + FOREACH(flag_var + CMAKE_CXX_FLAGS + CMAKE_CXX_FLAGS_DEBUG + CMAKE_CXX_FLAGS_RELEASE + CMAKE_CXX_FLAGS_MINSIZEREL + CMAKE_CXX_FLAGS_RELWITHDEBINFO + ) + IF(${flag_var} MATCHES "/MD") + STRING(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}") + ENDIF(${flag_var} MATCHES "/MD") + ENDFOREACH(flag_var) + ENDIF() +ENDIF(MSVC) + +TARGET_INCLUDE_DIRECTORIES(grpc_node_plugin PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src PRIVATE ${PROTOBUF_ROOT_DIR}/include ) -target_link_libraries(grpc_node_plugin +TARGET_LINK_LIBRARIES(grpc_node_plugin libprotoc libprotobuf -) \ No newline at end of file +) diff --git a/packages/grpc-tools/README.md b/packages/grpc-tools/README.md index eae786929..980ce93eb 100644 --- a/packages/grpc-tools/README.md +++ b/packages/grpc-tools/README.md @@ -9,7 +9,7 @@ libraries. This library exports the `grpc_tools_node_protoc` executable, which accepts all of the same arguments as `protoc` itself. For use with Node, you most likely want to use CommonJS-style imports. An example of generating code this way can -be found in [this guide](https://developers.google.com/protocol-buffers/docs/reference/javascript-generated#commonjs-imports). +be found in [this guide](https://github.com/protocolbuffers/protobuf-javascript/blob/main/docs/index.md). The `grpc_tools_node_protoc` automatically includes the Node gRPC plugin, so it also accepts the `--grpc_out=[option:]path` argument. The option can be one of the following: @@ -19,4 +19,4 @@ one of the following: - `generate_package_definition`: Generates code that does not `require` any gRPC library, and instead generates `PackageDefinition` objects that can be passed to the `loadPackageDefinition` function provided by both the - `grpc` and `@grpc/grpc-js` libraries. \ No newline at end of file + `grpc` and `@grpc/grpc-js` libraries. diff --git a/packages/grpc-tools/build_binaries.sh b/packages/grpc-tools/build_binaries.sh index b26946afa..e98743ba1 100755 --- a/packages/grpc-tools/build_binaries.sh +++ b/packages/grpc-tools/build_binaries.sh @@ -17,48 +17,68 @@ set -e uname -a -cd $(dirname $0) +cd "$(dirname "$0")" base=$(pwd) protobuf_base=$base/deps/protobuf -tools_version=$(jq '.version' < package.json | tr -d '"') +tools_version=$(jq '.version' { + if (outputTemplate === inputTemplate) { + throw new Error('inputTemplate and outputTemplate must differ') + } + return { + outputName: (n: string) => outputTemplate.replace(templateStr, n), + inputName: (n: string) => inputTemplate.replace(templateStr, n) + }; +} + type GeneratorOptions = Protobuf.IParseOptions & Protobuf.IConversionOptions & { includeDirs?: string[]; - grpcLib: string; + grpcLib?: string; outDir: string; verbose?: boolean; includeComments?: boolean; + inputTemplate: string; + outputTemplate: string; + inputBranded: boolean; + outputBranded: boolean; } class TextFormatter { @@ -114,27 +129,24 @@ function getTypeInterfaceName(type: Protobuf.Type | Protobuf.Enum | Protobuf.Ser return type.fullName.replace(/\./g, '_'); } -function getImportLine(dependency: Protobuf.Type | Protobuf.Enum | Protobuf.Service, from?: Protobuf.Type | Protobuf.Service) { +function getImportLine(dependency: Protobuf.Type | Protobuf.Enum | Protobuf.Service, from: Protobuf.Type | Protobuf.Service | undefined, options: GeneratorOptions) { const filePath = from === undefined ? './' + getImportPath(dependency) : getRelativeImportPath(from, dependency); + const {outputName, inputName} = useNameFmter(options); const typeInterfaceName = getTypeInterfaceName(dependency); let importedTypes: string; /* If the dependency is defined within a message, it will be generated in that * message's file and exported using its typeInterfaceName. */ if (dependency.parent instanceof Protobuf.Type) { - if (dependency instanceof Protobuf.Type) { - importedTypes = `${typeInterfaceName}, ${typeInterfaceName}__Output`; - } else if (dependency instanceof Protobuf.Enum) { - importedTypes = `${typeInterfaceName}`; + if (dependency instanceof Protobuf.Type || dependency instanceof Protobuf.Enum) { + importedTypes = `${inputName(typeInterfaceName)}, ${outputName(typeInterfaceName)}`; } else if (dependency instanceof Protobuf.Service) { importedTypes = `${typeInterfaceName}Client, ${typeInterfaceName}Definition`; } else { throw new Error('Invalid object passed to getImportLine'); } } else { - if (dependency instanceof Protobuf.Type) { - importedTypes = `${dependency.name} as ${typeInterfaceName}, ${dependency.name}__Output as ${typeInterfaceName}__Output`; - } else if (dependency instanceof Protobuf.Enum) { - importedTypes = `${dependency.name} as ${typeInterfaceName}`; + if (dependency instanceof Protobuf.Type || dependency instanceof Protobuf.Enum) { + importedTypes = `${inputName(dependency.name)} as ${inputName(typeInterfaceName)}, ${outputName(dependency.name)} as ${outputName(typeInterfaceName)}`; } else if (dependency instanceof Protobuf.Service) { importedTypes = `${dependency.name}Client as ${typeInterfaceName}Client, ${dependency.name}Definition as ${typeInterfaceName}Definition`; } else { @@ -157,20 +169,34 @@ function getChildMessagesAndEnums(namespace: Protobuf.NamespaceBase): (Protobuf. return messageList; } -function formatComment(formatter: TextFormatter, comment?: string | null) { - if (!comment) { +function formatComment(formatter: TextFormatter, comment?: string | null, options?: Protobuf.ReflectionObject['options']) { + if (!comment && !options?.deprecated) { return; } formatter.writeLine('/**'); - for(const line of comment.split('\n')) { - formatter.writeLine(` * ${line.replace(/\*\//g, '* /')}`); + if (comment) { + for(const line of comment.split('\n')) { + formatter.writeLine(` * ${line.replace(/\*\//g, '* /')}`); + } + } + if (options?.deprecated) { + formatter.writeLine(' * @deprecated'); } formatter.writeLine(' */'); } +const typeBrandHint = `This field is a type brand and is not populated at runtime. Instances of this type should be created using type assertions. +https://github.com/grpc/grpc-node/pull/2281`; + +function formatTypeBrand(formatter: TextFormatter, messageType: Protobuf.Type) { + formatComment(formatter, typeBrandHint); + formatter.writeLine(`__type: '${messageType.fullName}'`); +} + // GENERATOR FUNCTIONS -function getTypeNamePermissive(fieldType: string, resolvedType: Protobuf.Type | Protobuf.Enum | null, repeated: boolean, map: boolean): string { +function getTypeNamePermissive(fieldType: string, resolvedType: Protobuf.Type | Protobuf.Enum | null, repeated: boolean, map: boolean, options: GeneratorOptions): string { + const {inputName} = useNameFmter(options); switch (fieldType) { case 'double': case 'float': @@ -200,18 +226,19 @@ function getTypeNamePermissive(fieldType: string, resolvedType: Protobuf.Type | const typeInterfaceName = getTypeInterfaceName(resolvedType); if (resolvedType instanceof Protobuf.Type) { if (repeated || map) { - return typeInterfaceName; + return inputName(typeInterfaceName); } else { - return `${typeInterfaceName} | null`; + return `${inputName(typeInterfaceName)} | null`; } } else { - return `${typeInterfaceName} | keyof typeof ${typeInterfaceName}`; + // Enum + return inputName(typeInterfaceName); } } } -function getFieldTypePermissive(field: Protobuf.FieldBase): string { - const valueType = getTypeNamePermissive(field.type, field.resolvedType, field.repeated, field.map); +function getFieldTypePermissive(field: Protobuf.FieldBase, options: GeneratorOptions): string { + const valueType = getTypeNamePermissive(field.type, field.resolvedType, field.repeated, field.map, options); if (field instanceof Protobuf.MapField) { const keyType = field.keyType === 'string' ? 'string' : 'number'; return `{[key: ${keyType}]: ${valueType}}`; @@ -221,40 +248,45 @@ function getFieldTypePermissive(field: Protobuf.FieldBase): string { } function generatePermissiveMessageInterface(formatter: TextFormatter, messageType: Protobuf.Type, options: GeneratorOptions, nameOverride?: string) { + const {inputName} = useNameFmter(options); if (options.includeComments) { - formatComment(formatter, messageType.comment); + formatComment(formatter, messageType.comment, messageType.options); } if (messageType.fullName === '.google.protobuf.Any') { /* This describes the behavior of the Protobuf.js Any wrapper fromObject * replacement function */ - formatter.writeLine('export type Any = AnyExtension | {'); + formatter.writeLine(`export type ${inputName('Any')} = AnyExtension | {`); formatter.writeLine(' type_url: string;'); formatter.writeLine(' value: Buffer | Uint8Array | string;'); formatter.writeLine('}'); return; } - formatter.writeLine(`export interface ${nameOverride ?? messageType.name} {`); + formatter.writeLine(`export interface ${inputName(nameOverride ?? messageType.name)} {`); formatter.indent(); for (const field of messageType.fieldsArray) { const repeatedString = field.repeated ? '[]' : ''; - const type: string = getFieldTypePermissive(field); + const type: string = getFieldTypePermissive(field, options); if (options.includeComments) { - formatComment(formatter, field.comment); + formatComment(formatter, field.comment, field.options); } formatter.writeLine(`'${field.name}'?: (${type})${repeatedString};`); } for (const oneof of messageType.oneofsArray) { const typeString = oneof.fieldsArray.map(field => `"${field.name}"`).join('|'); if (options.includeComments) { - formatComment(formatter, oneof.comment); + formatComment(formatter, oneof.comment, oneof.options); } formatter.writeLine(`'${oneof.name}'?: ${typeString};`); } + if (options.inputBranded) { + formatTypeBrand(formatter, messageType); + } formatter.unindent(); formatter.writeLine('}'); } function getTypeNameRestricted(fieldType: string, resolvedType: Protobuf.Type | Protobuf.Enum | null, repeated: boolean, map: boolean, options: GeneratorOptions): string { + const {outputName} = useNameFmter(options); switch (fieldType) { case 'double': case 'float': @@ -302,16 +334,13 @@ function getTypeNameRestricted(fieldType: string, resolvedType: Protobuf.Type | /* null is only used to represent absent message values if the defaults * option is set, and only for non-repeated, non-map fields. */ if (options.defaults && !repeated && !map) { - return `${typeInterfaceName}__Output | null`; + return `${outputName(typeInterfaceName)} | null`; } else { - return `${typeInterfaceName}__Output`; + return `${outputName(typeInterfaceName)}`; } } else { - if (options.enums == String) { - return `keyof typeof ${typeInterfaceName}`; - } else { - return typeInterfaceName; - } + // Enum + return outputName(typeInterfaceName); } } } @@ -327,20 +356,21 @@ function getFieldTypeRestricted(field: Protobuf.FieldBase, options: GeneratorOpt } function generateRestrictedMessageInterface(formatter: TextFormatter, messageType: Protobuf.Type, options: GeneratorOptions, nameOverride?: string) { + const {outputName} = useNameFmter(options); if (options.includeComments) { - formatComment(formatter, messageType.comment); + formatComment(formatter, messageType.comment, messageType.options); } if (messageType.fullName === '.google.protobuf.Any' && options.json) { /* This describes the behavior of the Protobuf.js Any wrapper toObject * replacement function */ let optionalString = options.defaults ? '' : '?'; - formatter.writeLine('export type Any__Output = AnyExtension | {'); + formatter.writeLine(`export type ${outputName('Any')} = AnyExtension | {`); formatter.writeLine(` type_url${optionalString}: string;`); formatter.writeLine(` value${optionalString}: ${getTypeNameRestricted('bytes', null, false, false, options)};`); formatter.writeLine('}'); return; } - formatter.writeLine(`export interface ${nameOverride ?? messageType.name}__Output {`); + formatter.writeLine(`export interface ${outputName(nameOverride ?? messageType.name)} {`); formatter.indent(); for (const field of messageType.fieldsArray) { let fieldGuaranteed: boolean; @@ -358,7 +388,7 @@ function generateRestrictedMessageInterface(formatter: TextFormatter, messageTyp const repeatedString = field.repeated ? '[]' : ''; const type = getFieldTypeRestricted(field, options); if (options.includeComments) { - formatComment(formatter, field.comment); + formatComment(formatter, field.comment, field.options); } formatter.writeLine(`'${field.name}'${optionalString}: (${type})${repeatedString};`); } @@ -366,11 +396,14 @@ function generateRestrictedMessageInterface(formatter: TextFormatter, messageTyp for (const oneof of messageType.oneofsArray) { const typeString = oneof.fieldsArray.map(field => `"${field.name}"`).join('|'); if (options.includeComments) { - formatComment(formatter, oneof.comment); + formatComment(formatter, oneof.comment, oneof.options); } formatter.writeLine(`'${oneof.name}': ${typeString};`); } } + if (options.outputBranded) { + formatTypeBrand(formatter, messageType); + } formatter.unindent(); formatter.writeLine('}'); } @@ -379,8 +412,10 @@ function generateMessageInterfaces(formatter: TextFormatter, messageType: Protob let usesLong: boolean = false; let seenDeps: Set = new Set(); const childTypes = getChildMessagesAndEnums(messageType); - formatter.writeLine(`// Original file: ${messageType.filename}`); + formatter.writeLine(`// Original file: ${(messageType.filename ?? 'null')?.replace(/\\/g, '/')}`); formatter.writeLine(''); + const isLongField = (field: Protobuf.Field) => + ['int64', 'uint64', 'sint64', 'fixed64', 'sfixed64'].includes(field.type); messageType.fieldsArray.sort((fieldA, fieldB) => fieldA.id - fieldB.id); for (const field of messageType.fieldsArray) { if (field.resolvedType && childTypes.indexOf(field.resolvedType) < 0) { @@ -389,9 +424,9 @@ function generateMessageInterfaces(formatter: TextFormatter, messageType: Protob continue; } seenDeps.add(dependency.fullName); - formatter.writeLine(getImportLine(dependency, messageType)); + formatter.writeLine(getImportLine(dependency, messageType, options)); } - if (field.type.indexOf('64') >= 0) { + if (isLongField(field)) { usesLong = true; } } @@ -404,9 +439,9 @@ function generateMessageInterfaces(formatter: TextFormatter, messageType: Protob continue; } seenDeps.add(dependency.fullName); - formatter.writeLine(getImportLine(dependency, messageType)); + formatter.writeLine(getImportLine(dependency, messageType, options)); } - if (field.type.indexOf('64') >= 0) { + if (isLongField(field)) { usesLong = true; } } @@ -437,38 +472,99 @@ function generateMessageInterfaces(formatter: TextFormatter, messageType: Protob } function generateEnumInterface(formatter: TextFormatter, enumType: Protobuf.Enum, options: GeneratorOptions, nameOverride?: string) { - formatter.writeLine(`// Original file: ${enumType.filename}`); + const {inputName, outputName} = useNameFmter(options); + const name = nameOverride ?? enumType.name; + formatter.writeLine(`// Original file: ${(enumType.filename ?? 'null')?.replace(/\\/g, '/')}`); + formatter.writeLine(''); + if (options.includeComments) { + formatComment(formatter, enumType.comment, enumType.options); + } + formatter.writeLine(`export const ${name} = {`); + formatter.indent(); + for (const key of Object.keys(enumType.values)) { + if (options.includeComments) { + formatComment(formatter, enumType.comments[key], (enumType.valuesOptions ?? {})[key]); + } + formatter.writeLine(`${key}: ${options.enums == String ? `'${key}'` : enumType.values[key]},`); + } + formatter.unindent(); + formatter.writeLine('} as const;'); + + // Permissive Type formatter.writeLine(''); if (options.includeComments) { - formatComment(formatter, enumType.comment); + formatComment(formatter, enumType.comment, enumType.options); } - formatter.writeLine(`export enum ${nameOverride ?? enumType.name} {`); + formatter.writeLine(`export type ${inputName(name)} =`) formatter.indent(); for (const key of Object.keys(enumType.values)) { if (options.includeComments) { formatComment(formatter, enumType.comments[key]); } - formatter.writeLine(`${key} = ${enumType.values[key]},`); + formatter.writeLine(`| '${key}'`); + formatter.writeLine(`| ${enumType.values[key]}`); } formatter.unindent(); - formatter.writeLine('}'); + + // Restrictive Type + formatter.writeLine(''); + if (options.includeComments) { + formatComment(formatter, enumType.comment, enumType.options); + } + formatter.writeLine(`export type ${outputName(name)} = typeof ${name}[keyof typeof ${name}]`) } +/** + * This is a list of methods that are exist in the generic Client class in the + * gRPC libraries. TypeScript has a problem with methods in subclasses with the + * same names as methods in the superclass, but with mismatched APIs. So, we + * avoid generating methods with these names in the service client interfaces. + * We always generate two service client methods per service method: one camel + * cased, and one with the original casing. So we will still generate one + * service client method for any conflicting name. + * + * Technically, at runtime conflicting name in the service client method + * actually shadows the original method, but TypeScript does not have a good + * way to represent that. So this change is not 100% accurate, but it gets the + * generated code to compile. + * + * This is just a list of the methods in the Client class definitions in + * grpc@1.24.11 and @grpc/grpc-js@1.4.0. + */ +const CLIENT_RESERVED_METHOD_NAMES = new Set([ + 'close', + 'getChannel', + 'waitForReady', + 'makeUnaryRequest', + 'makeClientStreamRequest', + 'makeServerStreamRequest', + 'makeBidiStreamRequest', + 'resolveCallInterceptors', + /* These methods are private, but TypeScript is not happy with overriding even + * private methods with mismatched APIs. */ + 'checkOptionalUnaryResponseArguments', + 'checkMetadataAndOptions' +]); + function generateServiceClientInterface(formatter: TextFormatter, serviceType: Protobuf.Service, options: GeneratorOptions) { + const {outputName, inputName} = useNameFmter(options); if (options.includeComments) { - formatComment(formatter, serviceType.comment); + formatComment(formatter, serviceType.comment, serviceType.options); } formatter.writeLine(`export interface ${serviceType.name}Client extends grpc.Client {`); formatter.indent(); for (const methodName of Object.keys(serviceType.methods).sort()) { const method = serviceType.methods[methodName]; for (const name of [methodName, camelCase(methodName)]) { + if (CLIENT_RESERVED_METHOD_NAMES.has(name)) { + continue; + } if (options.includeComments) { - formatComment(formatter, method.comment); + formatComment(formatter, method.comment, method.options); } - const requestType = getTypeInterfaceName(method.resolvedRequestType!); - const responseType = getTypeInterfaceName(method.resolvedResponseType!) + '__Output'; - const callbackType = `(error?: grpc.ServiceError, result?: ${responseType}) => void`; + const requestType = inputName(getTypeInterfaceName(method.resolvedRequestType!)); + const responseType = outputName(getTypeInterfaceName(method.resolvedResponseType!)); + const callbackType = `grpc.requestCallback<${responseType}>`; if (method.requestStream) { if (method.responseStream) { // Bidi streaming @@ -506,18 +602,19 @@ function generateServiceClientInterface(formatter: TextFormatter, serviceType: P } function generateServiceHandlerInterface(formatter: TextFormatter, serviceType: Protobuf.Service, options: GeneratorOptions) { + const {inputName, outputName} = useNameFmter(options); if (options.includeComments) { - formatComment(formatter, serviceType.comment); + formatComment(formatter, serviceType.comment, serviceType.options); } formatter.writeLine(`export interface ${serviceType.name}Handlers extends grpc.UntypedServiceImplementation {`); formatter.indent(); for (const methodName of Object.keys(serviceType.methods).sort()) { const method = serviceType.methods[methodName]; if (options.includeComments) { - formatComment(formatter, method.comment); + formatComment(formatter, method.comment, serviceType.options); } - const requestType = getTypeInterfaceName(method.resolvedRequestType!) + '__Output'; - const responseType = getTypeInterfaceName(method.resolvedResponseType!); + const requestType = outputName(getTypeInterfaceName(method.resolvedRequestType!)); + const responseType = inputName(getTypeInterfaceName(method.resolvedResponseType!)); if (method.requestStream) { if (method.responseStream) { // Bidi streaming @@ -541,24 +638,31 @@ function generateServiceHandlerInterface(formatter: TextFormatter, serviceType: formatter.writeLine('}'); } -function generateServiceDefinitionInterface(formatter: TextFormatter, serviceType: Protobuf.Service) { - formatter.writeLine(`export interface ${serviceType.name}Definition {`); +function generateServiceDefinitionInterface(formatter: TextFormatter, serviceType: Protobuf.Service, options: GeneratorOptions) { + const {inputName, outputName} = useNameFmter(options); + if (options.grpcLib) { + formatter.writeLine(`export interface ${serviceType.name}Definition extends grpc.ServiceDefinition {`); + } else { + formatter.writeLine(`export interface ${serviceType.name}Definition {`); + } formatter.indent(); for (const methodName of Object.keys(serviceType.methods).sort()) { const method = serviceType.methods[methodName]; const requestType = getTypeInterfaceName(method.resolvedRequestType!); const responseType = getTypeInterfaceName(method.resolvedResponseType!); - formatter.writeLine(`${methodName}: MethodDefinition<${requestType}, ${responseType}, ${requestType}__Output, ${responseType}__Output>`); + formatter.writeLine(`${methodName}: MethodDefinition<${inputName(requestType)}, ${inputName(responseType)}, ${outputName(requestType)}, ${outputName(responseType)}>`); } formatter.unindent(); formatter.writeLine('}') } function generateServiceInterfaces(formatter: TextFormatter, serviceType: Protobuf.Service, options: GeneratorOptions) { - formatter.writeLine(`// Original file: ${serviceType.filename}`); + formatter.writeLine(`// Original file: ${(serviceType.filename ?? 'null')?.replace(/\\/g, '/')}`); formatter.writeLine(''); - const grpcImportPath = options.grpcLib.startsWith('.') ? getPathToRoot(serviceType) + options.grpcLib : options.grpcLib; - formatter.writeLine(`import type * as grpc from '${grpcImportPath}'`); + if (options.grpcLib) { + const grpcImportPath = options.grpcLib.startsWith('.') ? getPathToRoot(serviceType) + options.grpcLib : options.grpcLib; + formatter.writeLine(`import type * as grpc from '${grpcImportPath}'`); + } formatter.writeLine(`import type { MethodDefinition } from '@grpc/proto-loader'`) const dependencies: Set = new Set(); for (const method of serviceType.methodsArray) { @@ -566,23 +670,53 @@ function generateServiceInterfaces(formatter: TextFormatter, serviceType: Protob dependencies.add(method.resolvedResponseType!); } for (const dep of Array.from(dependencies.values()).sort(compareName)) { - formatter.writeLine(getImportLine(dep, serviceType)); + formatter.writeLine(getImportLine(dep, serviceType, options)); } formatter.writeLine(''); - generateServiceClientInterface(formatter, serviceType, options); - formatter.writeLine(''); + if (options.grpcLib) { + generateServiceClientInterface(formatter, serviceType, options); + formatter.writeLine(''); - generateServiceHandlerInterface(formatter, serviceType, options); - formatter.writeLine(''); + generateServiceHandlerInterface(formatter, serviceType, options); + formatter.writeLine(''); + } - generateServiceDefinitionInterface(formatter, serviceType); + generateServiceDefinitionInterface(formatter, serviceType, options); +} + +function containsDefinition(definitionType: typeof Protobuf.Type | typeof Protobuf.Enum, namespace: Protobuf.NamespaceBase): boolean { + for (const nested of namespace.nestedArray.sort(compareName)) { + if (nested instanceof definitionType) { + return true; + } else if (isNamespaceBase(nested) && !(nested instanceof Protobuf.Type) && !(nested instanceof Protobuf.Enum) && containsDefinition(definitionType, nested)) { + return true; + } + } + + return false; +} + +function generateDefinitionImports(formatter: TextFormatter, namespace: Protobuf.NamespaceBase, options: GeneratorOptions) { + const imports = []; + + if (containsDefinition(Protobuf.Enum, namespace)) { + imports.push('EnumTypeDefinition'); + } + + if (containsDefinition(Protobuf.Type, namespace)) { + imports.push('MessageTypeDefinition'); + } + + if (imports.length) { + formatter.writeLine(`import type { ${imports.join(', ')} } from '@grpc/proto-loader';`); + } } function generateServiceImports(formatter: TextFormatter, namespace: Protobuf.NamespaceBase, options: GeneratorOptions) { for (const nested of namespace.nestedArray.sort(compareName)) { if (nested instanceof Protobuf.Service) { - formatter.writeLine(getImportLine(nested)); + formatter.writeLine(getImportLine(nested, undefined, options)); } else if (isNamespaceBase(nested) && !(nested instanceof Protobuf.Type) && !(nested instanceof Protobuf.Enum)) { generateServiceImports(formatter, nested, options); } @@ -592,7 +726,7 @@ function generateServiceImports(formatter: TextFormatter, namespace: Protobuf.Na function generateSingleLoadedDefinitionType(formatter: TextFormatter, nested: Protobuf.ReflectionObject, options: GeneratorOptions) { if (nested instanceof Protobuf.Service) { if (options.includeComments) { - formatComment(formatter, nested.comment); + formatComment(formatter, nested.comment, nested.options); } const typeInterfaceName = getTypeInterfaceName(nested); formatter.writeLine(`${nested.name}: SubtypeConstructor & { service: ${typeInterfaceName}Definition }`); @@ -616,8 +750,11 @@ function generateLoadedDefinitionTypes(formatter: TextFormatter, namespace: Prot } function generateRootFile(formatter: TextFormatter, root: Protobuf.Root, options: GeneratorOptions) { + if (!options.grpcLib) { + return; + } formatter.writeLine(`import type * as grpc from '${options.grpcLib}';`); - formatter.writeLine("import type { ServiceDefinition, EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader';"); + generateDefinitionImports(formatter, root, options); formatter.writeLine(''); generateServiceImports(formatter, root, options); @@ -676,11 +813,13 @@ function writeFilesForRoot(root: Protobuf.Root, masterFileName: string, options: const filePromises: Promise[] = []; const masterFileFormatter = new TextFormatter(); - generateRootFile(masterFileFormatter, root, options); - if (options.verbose) { - console.log(`Writing ${options.outDir}/${masterFileName}`); + if (options.grpcLib) { + generateRootFile(masterFileFormatter, root, options); + if (options.verbose) { + console.log(`Writing ${options.outDir}/${masterFileName}`); + } + filePromises.push(writeFile(`${options.outDir}/${masterFileName}`, masterFileFormatter.getFullText())); } - filePromises.push(writeFile(`${options.outDir}/${masterFileName}`, masterFileFormatter.getFullText())); filePromises.push(...generateFilesForNamespace(root, options)); @@ -704,26 +843,40 @@ async function writeAllFiles(protoFiles: string[], options: GeneratorOptions) { } } -function runScript() { - const argv = yargs +async function runScript() { + const boolDefaultFalseOption = { + boolean: true, + default: false, + }; + const argv = await yargs .parserConfiguration({ 'parse-positional-numbers': false }) - .string(['includeDirs', 'grpcLib']) - .normalize(['includeDirs', 'outDir']) - .array('includeDirs') - .boolean(['keepCase', 'defaults', 'arrays', 'objects', 'oneofs', 'json', 'verbose', 'includeComments']) - .string(['longs', 'enums', 'bytes']) - .default('keepCase', false) - .default('defaults', false) - .default('arrays', false) - .default('objects', false) - .default('oneofs', false) - .default('json', false) - .default('includeComments', false) - .default('longs', 'Long') - .default('enums', 'number') - .default('bytes', 'Buffer') + .option('keepCase', boolDefaultFalseOption) + .option('longs', { string: true, default: 'Long' }) + .option('enums', { string: true, default: 'number' }) + .option('bytes', { string: true, default: 'Buffer' }) + .option('defaults', boolDefaultFalseOption) + .option('arrays', boolDefaultFalseOption) + .option('objects', boolDefaultFalseOption) + .option('oneofs', boolDefaultFalseOption) + .option('json', boolDefaultFalseOption) + .boolean('verbose') + .option('includeComments', boolDefaultFalseOption) + .option('includeDirs', { + normalize: true, + array: true, + alias: 'I' + }) + .option('outDir', { + alias: 'O', + normalize: true, + }) + .option('grpcLib', { string: true }) + .option('inputTemplate', { string: true, default: `${templateStr}` }) + .option('outputTemplate', { string: true, default: `${templateStr}__Output` }) + .option('inputBranded', boolDefaultFalseOption) + .option('outputBranded', boolDefaultFalseOption) .coerce('longs', value => { switch (value) { case 'String': return String; @@ -742,9 +895,8 @@ function runScript() { case 'String': return String; default: return undefined; } - }).alias({ - includeDirs: 'I', - outDir: 'O', + }) + .alias({ verbose: 'v' }).describe({ keepCase: 'Preserve the case of field names', @@ -759,8 +911,12 @@ function runScript() { includeComments: 'Generate doc comments from comments in the original files', includeDirs: 'Directories to search for included files', outDir: 'Directory in which to output files', - grpcLib: 'The gRPC implementation library that these types will be used with' - }).demandOption(['outDir', 'grpcLib']) + grpcLib: 'The gRPC implementation library that these types will be used with. If not provided, some types will not be generated', + inputTemplate: 'Template for mapping input or "permissive" type names', + outputTemplate: 'Template for mapping output or "restricted" type names', + inputBranded: 'Output property for branded type for "permissive" types with fullName of the Message as its value', + outputBranded: 'Output property for branded type for "restricted" types with fullName of the Message as its value', + }).demandOption(['outDir']) .demand(1) .usage('$0 [options] filenames...') .epilogue('WARNING: This tool is in alpha. The CLI and generated code are subject to change') diff --git a/packages/proto-loader/golden-generated/echo.ts b/packages/proto-loader/golden-generated/echo.ts index f257a40e4..600e2864c 100644 --- a/packages/proto-loader/golden-generated/echo.ts +++ b/packages/proto-loader/golden-generated/echo.ts @@ -1,5 +1,5 @@ import type * as grpc from '@grpc/grpc-js'; -import type { ServiceDefinition, EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; +import type { EnumTypeDefinition, MessageTypeDefinition } from '@grpc/proto-loader'; import type { OperationsClient as _google_longrunning_OperationsClient, OperationsDefinition as _google_longrunning_OperationsDefinition } from './google/longrunning/Operations'; import type { EchoClient as _google_showcase_v1beta1_EchoClient, EchoDefinition as _google_showcase_v1beta1_EchoDefinition } from './google/showcase/v1beta1/Echo'; diff --git a/packages/proto-loader/golden-generated/google/api/CustomHttpPattern.ts b/packages/proto-loader/golden-generated/google/api/CustomHttpPattern.ts index 2b6490be6..2f6e20297 100644 --- a/packages/proto-loader/golden-generated/google/api/CustomHttpPattern.ts +++ b/packages/proto-loader/golden-generated/google/api/CustomHttpPattern.ts @@ -4,7 +4,7 @@ /** * A custom pattern is used for defining custom HTTP verb. */ -export interface CustomHttpPattern { +export interface ICustomHttpPattern { /** * The name of this custom HTTP verb. */ @@ -18,7 +18,7 @@ export interface CustomHttpPattern { /** * A custom pattern is used for defining custom HTTP verb. */ -export interface CustomHttpPattern__Output { +export interface OCustomHttpPattern { /** * The name of this custom HTTP verb. */ diff --git a/packages/proto-loader/golden-generated/google/api/FieldBehavior.ts b/packages/proto-loader/golden-generated/google/api/FieldBehavior.ts index 8ab676709..189d25be5 100644 --- a/packages/proto-loader/golden-generated/google/api/FieldBehavior.ts +++ b/packages/proto-loader/golden-generated/google/api/FieldBehavior.ts @@ -8,40 +8,101 @@ * * Note: This enum **may** receive new values in the future. */ -export enum FieldBehavior { +export const FieldBehavior = { /** * Conventional default for enums. Do not use this. */ - FIELD_BEHAVIOR_UNSPECIFIED = 0, + FIELD_BEHAVIOR_UNSPECIFIED: 'FIELD_BEHAVIOR_UNSPECIFIED', /** * Specifically denotes a field as optional. * While all fields in protocol buffers are optional, this may be specified * for emphasis if appropriate. */ - OPTIONAL = 1, + OPTIONAL: 'OPTIONAL', /** * Denotes a field as required. * This indicates that the field **must** be provided as part of the request, * and failure to do so will cause an error (usually `INVALID_ARGUMENT`). */ - REQUIRED = 2, + REQUIRED: 'REQUIRED', /** * Denotes a field as output only. * This indicates that the field is provided in responses, but including the * field in a request does nothing (the server *must* ignore it and * *must not* throw an error as a result of the field's presence). */ - OUTPUT_ONLY = 3, + OUTPUT_ONLY: 'OUTPUT_ONLY', /** * Denotes a field as input only. * This indicates that the field is provided in requests, and the * corresponding field is not included in output. */ - INPUT_ONLY = 4, + INPUT_ONLY: 'INPUT_ONLY', /** * Denotes a field as immutable. * This indicates that the field may be set once in a request to create a * resource, but may not be changed thereafter. */ - IMMUTABLE = 5, -} + IMMUTABLE: 'IMMUTABLE', +} as const; + +/** + * An indicator of the behavior of a given field (for example, that a field + * is required in requests, or given as output but ignored as input). + * This **does not** change the behavior in protocol buffers itself; it only + * denotes the behavior and may affect how API tooling handles the field. + * + * Note: This enum **may** receive new values in the future. + */ +export type IFieldBehavior = + /** + * Conventional default for enums. Do not use this. + */ + | 'FIELD_BEHAVIOR_UNSPECIFIED' + | 0 + /** + * Specifically denotes a field as optional. + * While all fields in protocol buffers are optional, this may be specified + * for emphasis if appropriate. + */ + | 'OPTIONAL' + | 1 + /** + * Denotes a field as required. + * This indicates that the field **must** be provided as part of the request, + * and failure to do so will cause an error (usually `INVALID_ARGUMENT`). + */ + | 'REQUIRED' + | 2 + /** + * Denotes a field as output only. + * This indicates that the field is provided in responses, but including the + * field in a request does nothing (the server *must* ignore it and + * *must not* throw an error as a result of the field's presence). + */ + | 'OUTPUT_ONLY' + | 3 + /** + * Denotes a field as input only. + * This indicates that the field is provided in requests, and the + * corresponding field is not included in output. + */ + | 'INPUT_ONLY' + | 4 + /** + * Denotes a field as immutable. + * This indicates that the field may be set once in a request to create a + * resource, but may not be changed thereafter. + */ + | 'IMMUTABLE' + | 5 + +/** + * An indicator of the behavior of a given field (for example, that a field + * is required in requests, or given as output but ignored as input). + * This **does not** change the behavior in protocol buffers itself; it only + * denotes the behavior and may affect how API tooling handles the field. + * + * Note: This enum **may** receive new values in the future. + */ +export type OFieldBehavior = typeof FieldBehavior[keyof typeof FieldBehavior] diff --git a/packages/proto-loader/golden-generated/google/api/Http.ts b/packages/proto-loader/golden-generated/google/api/Http.ts index e9b3cb309..6b6ae8a63 100644 --- a/packages/proto-loader/golden-generated/google/api/Http.ts +++ b/packages/proto-loader/golden-generated/google/api/Http.ts @@ -1,19 +1,19 @@ // Original file: deps/googleapis/google/api/http.proto -import type { HttpRule as _google_api_HttpRule, HttpRule__Output as _google_api_HttpRule__Output } from '../../google/api/HttpRule'; +import type { IHttpRule as I_google_api_HttpRule, OHttpRule as O_google_api_HttpRule } from '../../google/api/HttpRule'; /** * Defines the HTTP configuration for an API service. It contains a list of * [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method * to one or more HTTP REST API methods. */ -export interface Http { +export interface IHttp { /** * A list of HTTP configuration rules that apply to individual API methods. * * **NOTE:** All service configuration rules follow "last one wins" order. */ - 'rules'?: (_google_api_HttpRule)[]; + 'rules'?: (I_google_api_HttpRule)[]; /** * When set to true, URL path parameters will be fully URI-decoded except in * cases of single segment matches in reserved expansion, where "%2F" will be @@ -30,13 +30,13 @@ export interface Http { * [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method * to one or more HTTP REST API methods. */ -export interface Http__Output { +export interface OHttp { /** * A list of HTTP configuration rules that apply to individual API methods. * * **NOTE:** All service configuration rules follow "last one wins" order. */ - 'rules': (_google_api_HttpRule__Output)[]; + 'rules': (O_google_api_HttpRule)[]; /** * When set to true, URL path parameters will be fully URI-decoded except in * cases of single segment matches in reserved expansion, where "%2F" will be diff --git a/packages/proto-loader/golden-generated/google/api/HttpRule.ts b/packages/proto-loader/golden-generated/google/api/HttpRule.ts index 243a99f80..90efdc00d 100644 --- a/packages/proto-loader/golden-generated/google/api/HttpRule.ts +++ b/packages/proto-loader/golden-generated/google/api/HttpRule.ts @@ -1,7 +1,7 @@ // Original file: deps/googleapis/google/api/http.proto -import type { CustomHttpPattern as _google_api_CustomHttpPattern, CustomHttpPattern__Output as _google_api_CustomHttpPattern__Output } from '../../google/api/CustomHttpPattern'; -import type { HttpRule as _google_api_HttpRule, HttpRule__Output as _google_api_HttpRule__Output } from '../../google/api/HttpRule'; +import type { ICustomHttpPattern as I_google_api_CustomHttpPattern, OCustomHttpPattern as O_google_api_CustomHttpPattern } from '../../google/api/CustomHttpPattern'; +import type { IHttpRule as I_google_api_HttpRule, OHttpRule as O_google_api_HttpRule } from '../../google/api/HttpRule'; /** * # gRPC Transcoding @@ -274,7 +274,7 @@ import type { HttpRule as _google_api_HttpRule, HttpRule__Output as _google_api_ * the request or response body to a repeated field. However, some gRPC * Transcoding implementations may not support this feature. */ -export interface HttpRule { +export interface IHttpRule { /** * Selects a method to which this rule applies. * @@ -317,13 +317,13 @@ export interface HttpRule { * HTTP method unspecified for this rule. The wild-card rule is useful * for services that provide content to Web (HTML) clients. */ - 'custom'?: (_google_api_CustomHttpPattern | null); + 'custom'?: (I_google_api_CustomHttpPattern | null); /** * Additional HTTP bindings for the selector. Nested bindings must * not contain an `additional_bindings` field themselves (that is, * the nesting may only be one level deep). */ - 'additional_bindings'?: (_google_api_HttpRule)[]; + 'additional_bindings'?: (I_google_api_HttpRule)[]; /** * Optional. The name of the response field whose value is mapped to the HTTP * response body. When omitted, the entire response message will be used @@ -612,7 +612,7 @@ export interface HttpRule { * the request or response body to a repeated field. However, some gRPC * Transcoding implementations may not support this feature. */ -export interface HttpRule__Output { +export interface OHttpRule { /** * Selects a method to which this rule applies. * @@ -655,13 +655,13 @@ export interface HttpRule__Output { * HTTP method unspecified for this rule. The wild-card rule is useful * for services that provide content to Web (HTML) clients. */ - 'custom'?: (_google_api_CustomHttpPattern__Output | null); + 'custom'?: (O_google_api_CustomHttpPattern | null); /** * Additional HTTP bindings for the selector. Nested bindings must * not contain an `additional_bindings` field themselves (that is, * the nesting may only be one level deep). */ - 'additional_bindings': (_google_api_HttpRule__Output)[]; + 'additional_bindings': (O_google_api_HttpRule)[]; /** * Optional. The name of the response field whose value is mapped to the HTTP * response body. When omitted, the entire response message will be used diff --git a/packages/proto-loader/golden-generated/google/longrunning/CancelOperationRequest.ts b/packages/proto-loader/golden-generated/google/longrunning/CancelOperationRequest.ts index 05fbc842e..7e0f15ed8 100644 --- a/packages/proto-loader/golden-generated/google/longrunning/CancelOperationRequest.ts +++ b/packages/proto-loader/golden-generated/google/longrunning/CancelOperationRequest.ts @@ -4,7 +4,7 @@ /** * The request message for [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]. */ -export interface CancelOperationRequest { +export interface ICancelOperationRequest { /** * The name of the operation resource to be cancelled. */ @@ -14,7 +14,7 @@ export interface CancelOperationRequest { /** * The request message for [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]. */ -export interface CancelOperationRequest__Output { +export interface OCancelOperationRequest { /** * The name of the operation resource to be cancelled. */ diff --git a/packages/proto-loader/golden-generated/google/longrunning/DeleteOperationRequest.ts b/packages/proto-loader/golden-generated/google/longrunning/DeleteOperationRequest.ts index 0ad87cde9..39d669d0a 100644 --- a/packages/proto-loader/golden-generated/google/longrunning/DeleteOperationRequest.ts +++ b/packages/proto-loader/golden-generated/google/longrunning/DeleteOperationRequest.ts @@ -4,7 +4,7 @@ /** * The request message for [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation]. */ -export interface DeleteOperationRequest { +export interface IDeleteOperationRequest { /** * The name of the operation resource to be deleted. */ @@ -14,7 +14,7 @@ export interface DeleteOperationRequest { /** * The request message for [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation]. */ -export interface DeleteOperationRequest__Output { +export interface ODeleteOperationRequest { /** * The name of the operation resource to be deleted. */ diff --git a/packages/proto-loader/golden-generated/google/longrunning/GetOperationRequest.ts b/packages/proto-loader/golden-generated/google/longrunning/GetOperationRequest.ts index 039f01674..9667e2e87 100644 --- a/packages/proto-loader/golden-generated/google/longrunning/GetOperationRequest.ts +++ b/packages/proto-loader/golden-generated/google/longrunning/GetOperationRequest.ts @@ -4,7 +4,7 @@ /** * The request message for [Operations.GetOperation][google.longrunning.Operations.GetOperation]. */ -export interface GetOperationRequest { +export interface IGetOperationRequest { /** * The name of the operation resource. */ @@ -14,7 +14,7 @@ export interface GetOperationRequest { /** * The request message for [Operations.GetOperation][google.longrunning.Operations.GetOperation]. */ -export interface GetOperationRequest__Output { +export interface OGetOperationRequest { /** * The name of the operation resource. */ diff --git a/packages/proto-loader/golden-generated/google/longrunning/ListOperationsRequest.ts b/packages/proto-loader/golden-generated/google/longrunning/ListOperationsRequest.ts index 294ec6773..49dcd39f0 100644 --- a/packages/proto-loader/golden-generated/google/longrunning/ListOperationsRequest.ts +++ b/packages/proto-loader/golden-generated/google/longrunning/ListOperationsRequest.ts @@ -4,7 +4,7 @@ /** * The request message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. */ -export interface ListOperationsRequest { +export interface IListOperationsRequest { /** * The standard list filter. */ @@ -26,7 +26,7 @@ export interface ListOperationsRequest { /** * The request message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. */ -export interface ListOperationsRequest__Output { +export interface OListOperationsRequest { /** * The standard list filter. */ diff --git a/packages/proto-loader/golden-generated/google/longrunning/ListOperationsResponse.ts b/packages/proto-loader/golden-generated/google/longrunning/ListOperationsResponse.ts index c295aa801..1e8b9ed5a 100644 --- a/packages/proto-loader/golden-generated/google/longrunning/ListOperationsResponse.ts +++ b/packages/proto-loader/golden-generated/google/longrunning/ListOperationsResponse.ts @@ -1,15 +1,15 @@ // Original file: deps/googleapis/google/longrunning/operations.proto -import type { Operation as _google_longrunning_Operation, Operation__Output as _google_longrunning_Operation__Output } from '../../google/longrunning/Operation'; +import type { IOperation as I_google_longrunning_Operation, OOperation as O_google_longrunning_Operation } from '../../google/longrunning/Operation'; /** * The response message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. */ -export interface ListOperationsResponse { +export interface IListOperationsResponse { /** * A list of operations that matches the specified filter in the request. */ - 'operations'?: (_google_longrunning_Operation)[]; + 'operations'?: (I_google_longrunning_Operation)[]; /** * The standard List next-page token. */ @@ -19,11 +19,11 @@ export interface ListOperationsResponse { /** * The response message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. */ -export interface ListOperationsResponse__Output { +export interface OListOperationsResponse { /** * A list of operations that matches the specified filter in the request. */ - 'operations': (_google_longrunning_Operation__Output)[]; + 'operations': (O_google_longrunning_Operation)[]; /** * The standard List next-page token. */ diff --git a/packages/proto-loader/golden-generated/google/longrunning/Operation.ts b/packages/proto-loader/golden-generated/google/longrunning/Operation.ts index 2a4bbe1ee..bbd1d8078 100644 --- a/packages/proto-loader/golden-generated/google/longrunning/Operation.ts +++ b/packages/proto-loader/golden-generated/google/longrunning/Operation.ts @@ -1,13 +1,13 @@ // Original file: deps/googleapis/google/longrunning/operations.proto -import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../google/protobuf/Any'; -import type { Status as _google_rpc_Status, Status__Output as _google_rpc_Status__Output } from '../../google/rpc/Status'; +import type { IAny as I_google_protobuf_Any, OAny as O_google_protobuf_Any } from '../../google/protobuf/Any'; +import type { IStatus as I_google_rpc_Status, OStatus as O_google_rpc_Status } from '../../google/rpc/Status'; /** * This resource represents a long-running operation that is the result of a * network API call. */ -export interface Operation { +export interface IOperation { /** * The server-assigned name, which is only unique within the same service that * originally returns it. If you use the default HTTP mapping, the @@ -20,7 +20,7 @@ export interface Operation { * Some services might not provide such metadata. Any method that returns a * long-running operation should document the metadata type, if any. */ - 'metadata'?: (_google_protobuf_Any | null); + 'metadata'?: (I_google_protobuf_Any | null); /** * If the value is `false`, it means the operation is still in progress. * If `true`, the operation is completed, and either `error` or `response` is @@ -30,7 +30,7 @@ export interface Operation { /** * The error result of the operation in case of failure or cancellation. */ - 'error'?: (_google_rpc_Status | null); + 'error'?: (I_google_rpc_Status | null); /** * The normal response of the operation in case of success. If the original * method returns no data on success, such as `Delete`, the response is @@ -41,7 +41,7 @@ export interface Operation { * is `TakeSnapshot()`, the inferred response type is * `TakeSnapshotResponse`. */ - 'response'?: (_google_protobuf_Any | null); + 'response'?: (I_google_protobuf_Any | null); /** * The operation result, which can be either an `error` or a valid `response`. * If `done` == `false`, neither `error` nor `response` is set. @@ -54,7 +54,7 @@ export interface Operation { * This resource represents a long-running operation that is the result of a * network API call. */ -export interface Operation__Output { +export interface OOperation { /** * The server-assigned name, which is only unique within the same service that * originally returns it. If you use the default HTTP mapping, the @@ -67,7 +67,7 @@ export interface Operation__Output { * Some services might not provide such metadata. Any method that returns a * long-running operation should document the metadata type, if any. */ - 'metadata': (_google_protobuf_Any__Output | null); + 'metadata': (O_google_protobuf_Any | null); /** * If the value is `false`, it means the operation is still in progress. * If `true`, the operation is completed, and either `error` or `response` is @@ -77,7 +77,7 @@ export interface Operation__Output { /** * The error result of the operation in case of failure or cancellation. */ - 'error'?: (_google_rpc_Status__Output | null); + 'error'?: (O_google_rpc_Status | null); /** * The normal response of the operation in case of success. If the original * method returns no data on success, such as `Delete`, the response is @@ -88,7 +88,7 @@ export interface Operation__Output { * is `TakeSnapshot()`, the inferred response type is * `TakeSnapshotResponse`. */ - 'response'?: (_google_protobuf_Any__Output | null); + 'response'?: (O_google_protobuf_Any | null); /** * The operation result, which can be either an `error` or a valid `response`. * If `done` == `false`, neither `error` nor `response` is set. diff --git a/packages/proto-loader/golden-generated/google/longrunning/OperationInfo.ts b/packages/proto-loader/golden-generated/google/longrunning/OperationInfo.ts index 343e2f8c9..907574412 100644 --- a/packages/proto-loader/golden-generated/google/longrunning/OperationInfo.ts +++ b/packages/proto-loader/golden-generated/google/longrunning/OperationInfo.ts @@ -14,7 +14,7 @@ * }; * } */ -export interface OperationInfo { +export interface IOperationInfo { /** * Required. The message name of the primary return type for this * long-running operation. @@ -51,7 +51,7 @@ export interface OperationInfo { * }; * } */ -export interface OperationInfo__Output { +export interface OOperationInfo { /** * Required. The message name of the primary return type for this * long-running operation. diff --git a/packages/proto-loader/golden-generated/google/longrunning/Operations.ts b/packages/proto-loader/golden-generated/google/longrunning/Operations.ts index 8e5684ada..00d6a95d2 100644 --- a/packages/proto-loader/golden-generated/google/longrunning/Operations.ts +++ b/packages/proto-loader/golden-generated/google/longrunning/Operations.ts @@ -2,14 +2,14 @@ import type * as grpc from '@grpc/grpc-js' import type { MethodDefinition } from '@grpc/proto-loader' -import type { CancelOperationRequest as _google_longrunning_CancelOperationRequest, CancelOperationRequest__Output as _google_longrunning_CancelOperationRequest__Output } from '../../google/longrunning/CancelOperationRequest'; -import type { DeleteOperationRequest as _google_longrunning_DeleteOperationRequest, DeleteOperationRequest__Output as _google_longrunning_DeleteOperationRequest__Output } from '../../google/longrunning/DeleteOperationRequest'; -import type { Empty as _google_protobuf_Empty, Empty__Output as _google_protobuf_Empty__Output } from '../../google/protobuf/Empty'; -import type { GetOperationRequest as _google_longrunning_GetOperationRequest, GetOperationRequest__Output as _google_longrunning_GetOperationRequest__Output } from '../../google/longrunning/GetOperationRequest'; -import type { ListOperationsRequest as _google_longrunning_ListOperationsRequest, ListOperationsRequest__Output as _google_longrunning_ListOperationsRequest__Output } from '../../google/longrunning/ListOperationsRequest'; -import type { ListOperationsResponse as _google_longrunning_ListOperationsResponse, ListOperationsResponse__Output as _google_longrunning_ListOperationsResponse__Output } from '../../google/longrunning/ListOperationsResponse'; -import type { Operation as _google_longrunning_Operation, Operation__Output as _google_longrunning_Operation__Output } from '../../google/longrunning/Operation'; -import type { WaitOperationRequest as _google_longrunning_WaitOperationRequest, WaitOperationRequest__Output as _google_longrunning_WaitOperationRequest__Output } from '../../google/longrunning/WaitOperationRequest'; +import type { ICancelOperationRequest as I_google_longrunning_CancelOperationRequest, OCancelOperationRequest as O_google_longrunning_CancelOperationRequest } from '../../google/longrunning/CancelOperationRequest'; +import type { IDeleteOperationRequest as I_google_longrunning_DeleteOperationRequest, ODeleteOperationRequest as O_google_longrunning_DeleteOperationRequest } from '../../google/longrunning/DeleteOperationRequest'; +import type { IEmpty as I_google_protobuf_Empty, OEmpty as O_google_protobuf_Empty } from '../../google/protobuf/Empty'; +import type { IGetOperationRequest as I_google_longrunning_GetOperationRequest, OGetOperationRequest as O_google_longrunning_GetOperationRequest } from '../../google/longrunning/GetOperationRequest'; +import type { IListOperationsRequest as I_google_longrunning_ListOperationsRequest, OListOperationsRequest as O_google_longrunning_ListOperationsRequest } from '../../google/longrunning/ListOperationsRequest'; +import type { IListOperationsResponse as I_google_longrunning_ListOperationsResponse, OListOperationsResponse as O_google_longrunning_ListOperationsResponse } from '../../google/longrunning/ListOperationsResponse'; +import type { IOperation as I_google_longrunning_Operation, OOperation as O_google_longrunning_Operation } from '../../google/longrunning/Operation'; +import type { IWaitOperationRequest as I_google_longrunning_WaitOperationRequest, OWaitOperationRequest as O_google_longrunning_WaitOperationRequest } from '../../google/longrunning/WaitOperationRequest'; /** * Manages long-running operations with an API service. @@ -35,10 +35,10 @@ export interface OperationsClient extends grpc.Client { * an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, * corresponding to `Code.CANCELLED`. */ - CancelOperation(argument: _google_longrunning_CancelOperationRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_protobuf_Empty__Output) => void): grpc.ClientUnaryCall; - CancelOperation(argument: _google_longrunning_CancelOperationRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _google_protobuf_Empty__Output) => void): grpc.ClientUnaryCall; - CancelOperation(argument: _google_longrunning_CancelOperationRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_protobuf_Empty__Output) => void): grpc.ClientUnaryCall; - CancelOperation(argument: _google_longrunning_CancelOperationRequest, callback: (error?: grpc.ServiceError, result?: _google_protobuf_Empty__Output) => void): grpc.ClientUnaryCall; + CancelOperation(argument: I_google_longrunning_CancelOperationRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + CancelOperation(argument: I_google_longrunning_CancelOperationRequest, metadata: grpc.Metadata, callback: grpc.requestCallback): grpc.ClientUnaryCall; + CancelOperation(argument: I_google_longrunning_CancelOperationRequest, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + CancelOperation(argument: I_google_longrunning_CancelOperationRequest, callback: grpc.requestCallback): grpc.ClientUnaryCall; /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not @@ -51,10 +51,10 @@ export interface OperationsClient extends grpc.Client { * an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, * corresponding to `Code.CANCELLED`. */ - cancelOperation(argument: _google_longrunning_CancelOperationRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_protobuf_Empty__Output) => void): grpc.ClientUnaryCall; - cancelOperation(argument: _google_longrunning_CancelOperationRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _google_protobuf_Empty__Output) => void): grpc.ClientUnaryCall; - cancelOperation(argument: _google_longrunning_CancelOperationRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_protobuf_Empty__Output) => void): grpc.ClientUnaryCall; - cancelOperation(argument: _google_longrunning_CancelOperationRequest, callback: (error?: grpc.ServiceError, result?: _google_protobuf_Empty__Output) => void): grpc.ClientUnaryCall; + cancelOperation(argument: I_google_longrunning_CancelOperationRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + cancelOperation(argument: I_google_longrunning_CancelOperationRequest, metadata: grpc.Metadata, callback: grpc.requestCallback): grpc.ClientUnaryCall; + cancelOperation(argument: I_google_longrunning_CancelOperationRequest, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + cancelOperation(argument: I_google_longrunning_CancelOperationRequest, callback: grpc.requestCallback): grpc.ClientUnaryCall; /** * Deletes a long-running operation. This method indicates that the client is @@ -62,39 +62,39 @@ export interface OperationsClient extends grpc.Client { * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. */ - DeleteOperation(argument: _google_longrunning_DeleteOperationRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_protobuf_Empty__Output) => void): grpc.ClientUnaryCall; - DeleteOperation(argument: _google_longrunning_DeleteOperationRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _google_protobuf_Empty__Output) => void): grpc.ClientUnaryCall; - DeleteOperation(argument: _google_longrunning_DeleteOperationRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_protobuf_Empty__Output) => void): grpc.ClientUnaryCall; - DeleteOperation(argument: _google_longrunning_DeleteOperationRequest, callback: (error?: grpc.ServiceError, result?: _google_protobuf_Empty__Output) => void): grpc.ClientUnaryCall; + DeleteOperation(argument: I_google_longrunning_DeleteOperationRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + DeleteOperation(argument: I_google_longrunning_DeleteOperationRequest, metadata: grpc.Metadata, callback: grpc.requestCallback): grpc.ClientUnaryCall; + DeleteOperation(argument: I_google_longrunning_DeleteOperationRequest, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + DeleteOperation(argument: I_google_longrunning_DeleteOperationRequest, callback: grpc.requestCallback): grpc.ClientUnaryCall; /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. */ - deleteOperation(argument: _google_longrunning_DeleteOperationRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_protobuf_Empty__Output) => void): grpc.ClientUnaryCall; - deleteOperation(argument: _google_longrunning_DeleteOperationRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _google_protobuf_Empty__Output) => void): grpc.ClientUnaryCall; - deleteOperation(argument: _google_longrunning_DeleteOperationRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_protobuf_Empty__Output) => void): grpc.ClientUnaryCall; - deleteOperation(argument: _google_longrunning_DeleteOperationRequest, callback: (error?: grpc.ServiceError, result?: _google_protobuf_Empty__Output) => void): grpc.ClientUnaryCall; + deleteOperation(argument: I_google_longrunning_DeleteOperationRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + deleteOperation(argument: I_google_longrunning_DeleteOperationRequest, metadata: grpc.Metadata, callback: grpc.requestCallback): grpc.ClientUnaryCall; + deleteOperation(argument: I_google_longrunning_DeleteOperationRequest, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + deleteOperation(argument: I_google_longrunning_DeleteOperationRequest, callback: grpc.requestCallback): grpc.ClientUnaryCall; /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. */ - GetOperation(argument: _google_longrunning_GetOperationRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; - GetOperation(argument: _google_longrunning_GetOperationRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; - GetOperation(argument: _google_longrunning_GetOperationRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; - GetOperation(argument: _google_longrunning_GetOperationRequest, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; + GetOperation(argument: I_google_longrunning_GetOperationRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + GetOperation(argument: I_google_longrunning_GetOperationRequest, metadata: grpc.Metadata, callback: grpc.requestCallback): grpc.ClientUnaryCall; + GetOperation(argument: I_google_longrunning_GetOperationRequest, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + GetOperation(argument: I_google_longrunning_GetOperationRequest, callback: grpc.requestCallback): grpc.ClientUnaryCall; /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. */ - getOperation(argument: _google_longrunning_GetOperationRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; - getOperation(argument: _google_longrunning_GetOperationRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; - getOperation(argument: _google_longrunning_GetOperationRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; - getOperation(argument: _google_longrunning_GetOperationRequest, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; + getOperation(argument: I_google_longrunning_GetOperationRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + getOperation(argument: I_google_longrunning_GetOperationRequest, metadata: grpc.Metadata, callback: grpc.requestCallback): grpc.ClientUnaryCall; + getOperation(argument: I_google_longrunning_GetOperationRequest, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + getOperation(argument: I_google_longrunning_GetOperationRequest, callback: grpc.requestCallback): grpc.ClientUnaryCall; /** * Lists operations that match the specified filter in the request. If the @@ -108,10 +108,10 @@ export interface OperationsClient extends grpc.Client { * collection id, however overriding users must ensure the name binding * is the parent resource, without the operations collection id. */ - ListOperations(argument: _google_longrunning_ListOperationsRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_longrunning_ListOperationsResponse__Output) => void): grpc.ClientUnaryCall; - ListOperations(argument: _google_longrunning_ListOperationsRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _google_longrunning_ListOperationsResponse__Output) => void): grpc.ClientUnaryCall; - ListOperations(argument: _google_longrunning_ListOperationsRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_longrunning_ListOperationsResponse__Output) => void): grpc.ClientUnaryCall; - ListOperations(argument: _google_longrunning_ListOperationsRequest, callback: (error?: grpc.ServiceError, result?: _google_longrunning_ListOperationsResponse__Output) => void): grpc.ClientUnaryCall; + ListOperations(argument: I_google_longrunning_ListOperationsRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + ListOperations(argument: I_google_longrunning_ListOperationsRequest, metadata: grpc.Metadata, callback: grpc.requestCallback): grpc.ClientUnaryCall; + ListOperations(argument: I_google_longrunning_ListOperationsRequest, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + ListOperations(argument: I_google_longrunning_ListOperationsRequest, callback: grpc.requestCallback): grpc.ClientUnaryCall; /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. @@ -124,10 +124,10 @@ export interface OperationsClient extends grpc.Client { * collection id, however overriding users must ensure the name binding * is the parent resource, without the operations collection id. */ - listOperations(argument: _google_longrunning_ListOperationsRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_longrunning_ListOperationsResponse__Output) => void): grpc.ClientUnaryCall; - listOperations(argument: _google_longrunning_ListOperationsRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _google_longrunning_ListOperationsResponse__Output) => void): grpc.ClientUnaryCall; - listOperations(argument: _google_longrunning_ListOperationsRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_longrunning_ListOperationsResponse__Output) => void): grpc.ClientUnaryCall; - listOperations(argument: _google_longrunning_ListOperationsRequest, callback: (error?: grpc.ServiceError, result?: _google_longrunning_ListOperationsResponse__Output) => void): grpc.ClientUnaryCall; + listOperations(argument: I_google_longrunning_ListOperationsRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + listOperations(argument: I_google_longrunning_ListOperationsRequest, metadata: grpc.Metadata, callback: grpc.requestCallback): grpc.ClientUnaryCall; + listOperations(argument: I_google_longrunning_ListOperationsRequest, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + listOperations(argument: I_google_longrunning_ListOperationsRequest, callback: grpc.requestCallback): grpc.ClientUnaryCall; /** * Waits for the specified long-running operation until it is done or reaches @@ -140,10 +140,10 @@ export interface OperationsClient extends grpc.Client { * state before the specified timeout (including immediately), meaning even an * immediate response is no guarantee that the operation is done. */ - WaitOperation(argument: _google_longrunning_WaitOperationRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; - WaitOperation(argument: _google_longrunning_WaitOperationRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; - WaitOperation(argument: _google_longrunning_WaitOperationRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; - WaitOperation(argument: _google_longrunning_WaitOperationRequest, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; + WaitOperation(argument: I_google_longrunning_WaitOperationRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + WaitOperation(argument: I_google_longrunning_WaitOperationRequest, metadata: grpc.Metadata, callback: grpc.requestCallback): grpc.ClientUnaryCall; + WaitOperation(argument: I_google_longrunning_WaitOperationRequest, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + WaitOperation(argument: I_google_longrunning_WaitOperationRequest, callback: grpc.requestCallback): grpc.ClientUnaryCall; /** * Waits for the specified long-running operation until it is done or reaches * at most a specified timeout, returning the latest state. If the operation @@ -155,10 +155,10 @@ export interface OperationsClient extends grpc.Client { * state before the specified timeout (including immediately), meaning even an * immediate response is no guarantee that the operation is done. */ - waitOperation(argument: _google_longrunning_WaitOperationRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; - waitOperation(argument: _google_longrunning_WaitOperationRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; - waitOperation(argument: _google_longrunning_WaitOperationRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; - waitOperation(argument: _google_longrunning_WaitOperationRequest, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; + waitOperation(argument: I_google_longrunning_WaitOperationRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + waitOperation(argument: I_google_longrunning_WaitOperationRequest, metadata: grpc.Metadata, callback: grpc.requestCallback): grpc.ClientUnaryCall; + waitOperation(argument: I_google_longrunning_WaitOperationRequest, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + waitOperation(argument: I_google_longrunning_WaitOperationRequest, callback: grpc.requestCallback): grpc.ClientUnaryCall; } @@ -186,7 +186,7 @@ export interface OperationsHandlers extends grpc.UntypedServiceImplementation { * an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, * corresponding to `Code.CANCELLED`. */ - CancelOperation: grpc.handleUnaryCall<_google_longrunning_CancelOperationRequest__Output, _google_protobuf_Empty>; + CancelOperation: grpc.handleUnaryCall; /** * Deletes a long-running operation. This method indicates that the client is @@ -194,14 +194,14 @@ export interface OperationsHandlers extends grpc.UntypedServiceImplementation { * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. */ - DeleteOperation: grpc.handleUnaryCall<_google_longrunning_DeleteOperationRequest__Output, _google_protobuf_Empty>; + DeleteOperation: grpc.handleUnaryCall; /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. */ - GetOperation: grpc.handleUnaryCall<_google_longrunning_GetOperationRequest__Output, _google_longrunning_Operation>; + GetOperation: grpc.handleUnaryCall; /** * Lists operations that match the specified filter in the request. If the @@ -215,7 +215,7 @@ export interface OperationsHandlers extends grpc.UntypedServiceImplementation { * collection id, however overriding users must ensure the name binding * is the parent resource, without the operations collection id. */ - ListOperations: grpc.handleUnaryCall<_google_longrunning_ListOperationsRequest__Output, _google_longrunning_ListOperationsResponse>; + ListOperations: grpc.handleUnaryCall; /** * Waits for the specified long-running operation until it is done or reaches @@ -228,14 +228,14 @@ export interface OperationsHandlers extends grpc.UntypedServiceImplementation { * state before the specified timeout (including immediately), meaning even an * immediate response is no guarantee that the operation is done. */ - WaitOperation: grpc.handleUnaryCall<_google_longrunning_WaitOperationRequest__Output, _google_longrunning_Operation>; + WaitOperation: grpc.handleUnaryCall; } -export interface OperationsDefinition { - CancelOperation: MethodDefinition<_google_longrunning_CancelOperationRequest, _google_protobuf_Empty, _google_longrunning_CancelOperationRequest__Output, _google_protobuf_Empty__Output> - DeleteOperation: MethodDefinition<_google_longrunning_DeleteOperationRequest, _google_protobuf_Empty, _google_longrunning_DeleteOperationRequest__Output, _google_protobuf_Empty__Output> - GetOperation: MethodDefinition<_google_longrunning_GetOperationRequest, _google_longrunning_Operation, _google_longrunning_GetOperationRequest__Output, _google_longrunning_Operation__Output> - ListOperations: MethodDefinition<_google_longrunning_ListOperationsRequest, _google_longrunning_ListOperationsResponse, _google_longrunning_ListOperationsRequest__Output, _google_longrunning_ListOperationsResponse__Output> - WaitOperation: MethodDefinition<_google_longrunning_WaitOperationRequest, _google_longrunning_Operation, _google_longrunning_WaitOperationRequest__Output, _google_longrunning_Operation__Output> +export interface OperationsDefinition extends grpc.ServiceDefinition { + CancelOperation: MethodDefinition + DeleteOperation: MethodDefinition + GetOperation: MethodDefinition + ListOperations: MethodDefinition + WaitOperation: MethodDefinition } diff --git a/packages/proto-loader/golden-generated/google/longrunning/WaitOperationRequest.ts b/packages/proto-loader/golden-generated/google/longrunning/WaitOperationRequest.ts index f97e39dc4..2f11f7580 100644 --- a/packages/proto-loader/golden-generated/google/longrunning/WaitOperationRequest.ts +++ b/packages/proto-loader/golden-generated/google/longrunning/WaitOperationRequest.ts @@ -1,11 +1,11 @@ // Original file: deps/googleapis/google/longrunning/operations.proto -import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../google/protobuf/Duration'; +import type { IDuration as I_google_protobuf_Duration, ODuration as O_google_protobuf_Duration } from '../../google/protobuf/Duration'; /** * The request message for [Operations.WaitOperation][google.longrunning.Operations.WaitOperation]. */ -export interface WaitOperationRequest { +export interface IWaitOperationRequest { /** * The name of the operation resource to wait on. */ @@ -15,13 +15,13 @@ export interface WaitOperationRequest { * will be at most the time permitted by the underlying HTTP/RPC protocol. * If RPC context deadline is also specified, the shorter one will be used. */ - 'timeout'?: (_google_protobuf_Duration | null); + 'timeout'?: (I_google_protobuf_Duration | null); } /** * The request message for [Operations.WaitOperation][google.longrunning.Operations.WaitOperation]. */ -export interface WaitOperationRequest__Output { +export interface OWaitOperationRequest { /** * The name of the operation resource to wait on. */ @@ -31,5 +31,5 @@ export interface WaitOperationRequest__Output { * will be at most the time permitted by the underlying HTTP/RPC protocol. * If RPC context deadline is also specified, the shorter one will be used. */ - 'timeout': (_google_protobuf_Duration__Output | null); + 'timeout': (O_google_protobuf_Duration | null); } diff --git a/packages/proto-loader/golden-generated/google/protobuf/Any.ts b/packages/proto-loader/golden-generated/google/protobuf/Any.ts index fe0d05f12..d9ee4e200 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/Any.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/Any.ts @@ -2,12 +2,12 @@ import type { AnyExtension } from '@grpc/proto-loader'; -export type Any = AnyExtension | { +export type IAny = AnyExtension | { type_url: string; value: Buffer | Uint8Array | string; } -export type Any__Output = AnyExtension | { +export type OAny = AnyExtension | { type_url: string; value: Buffer; } diff --git a/packages/proto-loader/golden-generated/google/protobuf/DescriptorProto.ts b/packages/proto-loader/golden-generated/google/protobuf/DescriptorProto.ts index f729437f4..5f568ca2c 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/DescriptorProto.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/DescriptorProto.ts @@ -1,53 +1,53 @@ // Original file: null -import type { FieldDescriptorProto as _google_protobuf_FieldDescriptorProto, FieldDescriptorProto__Output as _google_protobuf_FieldDescriptorProto__Output } from '../../google/protobuf/FieldDescriptorProto'; -import type { DescriptorProto as _google_protobuf_DescriptorProto, DescriptorProto__Output as _google_protobuf_DescriptorProto__Output } from '../../google/protobuf/DescriptorProto'; -import type { EnumDescriptorProto as _google_protobuf_EnumDescriptorProto, EnumDescriptorProto__Output as _google_protobuf_EnumDescriptorProto__Output } from '../../google/protobuf/EnumDescriptorProto'; -import type { MessageOptions as _google_protobuf_MessageOptions, MessageOptions__Output as _google_protobuf_MessageOptions__Output } from '../../google/protobuf/MessageOptions'; -import type { OneofDescriptorProto as _google_protobuf_OneofDescriptorProto, OneofDescriptorProto__Output as _google_protobuf_OneofDescriptorProto__Output } from '../../google/protobuf/OneofDescriptorProto'; +import type { IFieldDescriptorProto as I_google_protobuf_FieldDescriptorProto, OFieldDescriptorProto as O_google_protobuf_FieldDescriptorProto } from '../../google/protobuf/FieldDescriptorProto'; +import type { IDescriptorProto as I_google_protobuf_DescriptorProto, ODescriptorProto as O_google_protobuf_DescriptorProto } from '../../google/protobuf/DescriptorProto'; +import type { IEnumDescriptorProto as I_google_protobuf_EnumDescriptorProto, OEnumDescriptorProto as O_google_protobuf_EnumDescriptorProto } from '../../google/protobuf/EnumDescriptorProto'; +import type { IMessageOptions as I_google_protobuf_MessageOptions, OMessageOptions as O_google_protobuf_MessageOptions } from '../../google/protobuf/MessageOptions'; +import type { IOneofDescriptorProto as I_google_protobuf_OneofDescriptorProto, OOneofDescriptorProto as O_google_protobuf_OneofDescriptorProto } from '../../google/protobuf/OneofDescriptorProto'; -export interface _google_protobuf_DescriptorProto_ExtensionRange { +export interface I_google_protobuf_DescriptorProto_ExtensionRange { 'start'?: (number); 'end'?: (number); } -export interface _google_protobuf_DescriptorProto_ExtensionRange__Output { +export interface O_google_protobuf_DescriptorProto_ExtensionRange { 'start': (number); 'end': (number); } -export interface _google_protobuf_DescriptorProto_ReservedRange { +export interface I_google_protobuf_DescriptorProto_ReservedRange { 'start'?: (number); 'end'?: (number); } -export interface _google_protobuf_DescriptorProto_ReservedRange__Output { +export interface O_google_protobuf_DescriptorProto_ReservedRange { 'start': (number); 'end': (number); } -export interface DescriptorProto { +export interface IDescriptorProto { 'name'?: (string); - 'field'?: (_google_protobuf_FieldDescriptorProto)[]; - 'nestedType'?: (_google_protobuf_DescriptorProto)[]; - 'enumType'?: (_google_protobuf_EnumDescriptorProto)[]; - 'extensionRange'?: (_google_protobuf_DescriptorProto_ExtensionRange)[]; - 'extension'?: (_google_protobuf_FieldDescriptorProto)[]; - 'options'?: (_google_protobuf_MessageOptions | null); - 'oneofDecl'?: (_google_protobuf_OneofDescriptorProto)[]; - 'reservedRange'?: (_google_protobuf_DescriptorProto_ReservedRange)[]; + 'field'?: (I_google_protobuf_FieldDescriptorProto)[]; + 'nestedType'?: (I_google_protobuf_DescriptorProto)[]; + 'enumType'?: (I_google_protobuf_EnumDescriptorProto)[]; + 'extensionRange'?: (I_google_protobuf_DescriptorProto_ExtensionRange)[]; + 'extension'?: (I_google_protobuf_FieldDescriptorProto)[]; + 'options'?: (I_google_protobuf_MessageOptions | null); + 'oneofDecl'?: (I_google_protobuf_OneofDescriptorProto)[]; + 'reservedRange'?: (I_google_protobuf_DescriptorProto_ReservedRange)[]; 'reservedName'?: (string)[]; } -export interface DescriptorProto__Output { +export interface ODescriptorProto { 'name': (string); - 'field': (_google_protobuf_FieldDescriptorProto__Output)[]; - 'nestedType': (_google_protobuf_DescriptorProto__Output)[]; - 'enumType': (_google_protobuf_EnumDescriptorProto__Output)[]; - 'extensionRange': (_google_protobuf_DescriptorProto_ExtensionRange__Output)[]; - 'extension': (_google_protobuf_FieldDescriptorProto__Output)[]; - 'options': (_google_protobuf_MessageOptions__Output | null); - 'oneofDecl': (_google_protobuf_OneofDescriptorProto__Output)[]; - 'reservedRange': (_google_protobuf_DescriptorProto_ReservedRange__Output)[]; + 'field': (O_google_protobuf_FieldDescriptorProto)[]; + 'nestedType': (O_google_protobuf_DescriptorProto)[]; + 'enumType': (O_google_protobuf_EnumDescriptorProto)[]; + 'extensionRange': (O_google_protobuf_DescriptorProto_ExtensionRange)[]; + 'extension': (O_google_protobuf_FieldDescriptorProto)[]; + 'options': (O_google_protobuf_MessageOptions | null); + 'oneofDecl': (O_google_protobuf_OneofDescriptorProto)[]; + 'reservedRange': (O_google_protobuf_DescriptorProto_ReservedRange)[]; 'reservedName': (string)[]; } diff --git a/packages/proto-loader/golden-generated/google/protobuf/Duration.ts b/packages/proto-loader/golden-generated/google/protobuf/Duration.ts index 8595377a0..d5e3be89a 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/Duration.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/Duration.ts @@ -2,12 +2,12 @@ import type { Long } from '@grpc/proto-loader'; -export interface Duration { +export interface IDuration { 'seconds'?: (number | string | Long); 'nanos'?: (number); } -export interface Duration__Output { +export interface ODuration { 'seconds': (string); 'nanos': (number); } diff --git a/packages/proto-loader/golden-generated/google/protobuf/Empty.ts b/packages/proto-loader/golden-generated/google/protobuf/Empty.ts index f32c2a284..6594cc86c 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/Empty.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/Empty.ts @@ -1,8 +1,8 @@ // Original file: null -export interface Empty { +export interface IEmpty { } -export interface Empty__Output { +export interface OEmpty { } diff --git a/packages/proto-loader/golden-generated/google/protobuf/EnumDescriptorProto.ts b/packages/proto-loader/golden-generated/google/protobuf/EnumDescriptorProto.ts index dc4c9673e..30f52c610 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/EnumDescriptorProto.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/EnumDescriptorProto.ts @@ -1,16 +1,16 @@ // Original file: null -import type { EnumValueDescriptorProto as _google_protobuf_EnumValueDescriptorProto, EnumValueDescriptorProto__Output as _google_protobuf_EnumValueDescriptorProto__Output } from '../../google/protobuf/EnumValueDescriptorProto'; -import type { EnumOptions as _google_protobuf_EnumOptions, EnumOptions__Output as _google_protobuf_EnumOptions__Output } from '../../google/protobuf/EnumOptions'; +import type { IEnumValueDescriptorProto as I_google_protobuf_EnumValueDescriptorProto, OEnumValueDescriptorProto as O_google_protobuf_EnumValueDescriptorProto } from '../../google/protobuf/EnumValueDescriptorProto'; +import type { IEnumOptions as I_google_protobuf_EnumOptions, OEnumOptions as O_google_protobuf_EnumOptions } from '../../google/protobuf/EnumOptions'; -export interface EnumDescriptorProto { +export interface IEnumDescriptorProto { 'name'?: (string); - 'value'?: (_google_protobuf_EnumValueDescriptorProto)[]; - 'options'?: (_google_protobuf_EnumOptions | null); + 'value'?: (I_google_protobuf_EnumValueDescriptorProto)[]; + 'options'?: (I_google_protobuf_EnumOptions | null); } -export interface EnumDescriptorProto__Output { +export interface OEnumDescriptorProto { 'name': (string); - 'value': (_google_protobuf_EnumValueDescriptorProto__Output)[]; - 'options': (_google_protobuf_EnumOptions__Output | null); + 'value': (O_google_protobuf_EnumValueDescriptorProto)[]; + 'options': (O_google_protobuf_EnumOptions | null); } diff --git a/packages/proto-loader/golden-generated/google/protobuf/EnumOptions.ts b/packages/proto-loader/golden-generated/google/protobuf/EnumOptions.ts index b92ade4f9..6d2a0c2c6 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/EnumOptions.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/EnumOptions.ts @@ -1,15 +1,15 @@ // Original file: null -import type { UninterpretedOption as _google_protobuf_UninterpretedOption, UninterpretedOption__Output as _google_protobuf_UninterpretedOption__Output } from '../../google/protobuf/UninterpretedOption'; +import type { IUninterpretedOption as I_google_protobuf_UninterpretedOption, OUninterpretedOption as O_google_protobuf_UninterpretedOption } from '../../google/protobuf/UninterpretedOption'; -export interface EnumOptions { +export interface IEnumOptions { 'allowAlias'?: (boolean); 'deprecated'?: (boolean); - 'uninterpretedOption'?: (_google_protobuf_UninterpretedOption)[]; + 'uninterpretedOption'?: (I_google_protobuf_UninterpretedOption)[]; } -export interface EnumOptions__Output { +export interface OEnumOptions { 'allowAlias': (boolean); 'deprecated': (boolean); - 'uninterpretedOption': (_google_protobuf_UninterpretedOption__Output)[]; + 'uninterpretedOption': (O_google_protobuf_UninterpretedOption)[]; } diff --git a/packages/proto-loader/golden-generated/google/protobuf/EnumValueDescriptorProto.ts b/packages/proto-loader/golden-generated/google/protobuf/EnumValueDescriptorProto.ts index 7f8e57ea5..44cfcde4a 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/EnumValueDescriptorProto.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/EnumValueDescriptorProto.ts @@ -1,15 +1,15 @@ // Original file: null -import type { EnumValueOptions as _google_protobuf_EnumValueOptions, EnumValueOptions__Output as _google_protobuf_EnumValueOptions__Output } from '../../google/protobuf/EnumValueOptions'; +import type { IEnumValueOptions as I_google_protobuf_EnumValueOptions, OEnumValueOptions as O_google_protobuf_EnumValueOptions } from '../../google/protobuf/EnumValueOptions'; -export interface EnumValueDescriptorProto { +export interface IEnumValueDescriptorProto { 'name'?: (string); 'number'?: (number); - 'options'?: (_google_protobuf_EnumValueOptions | null); + 'options'?: (I_google_protobuf_EnumValueOptions | null); } -export interface EnumValueDescriptorProto__Output { +export interface OEnumValueDescriptorProto { 'name': (string); 'number': (number); - 'options': (_google_protobuf_EnumValueOptions__Output | null); + 'options': (O_google_protobuf_EnumValueOptions | null); } diff --git a/packages/proto-loader/golden-generated/google/protobuf/EnumValueOptions.ts b/packages/proto-loader/golden-generated/google/protobuf/EnumValueOptions.ts index e60ee6f4c..143381113 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/EnumValueOptions.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/EnumValueOptions.ts @@ -1,13 +1,13 @@ // Original file: null -import type { UninterpretedOption as _google_protobuf_UninterpretedOption, UninterpretedOption__Output as _google_protobuf_UninterpretedOption__Output } from '../../google/protobuf/UninterpretedOption'; +import type { IUninterpretedOption as I_google_protobuf_UninterpretedOption, OUninterpretedOption as O_google_protobuf_UninterpretedOption } from '../../google/protobuf/UninterpretedOption'; -export interface EnumValueOptions { +export interface IEnumValueOptions { 'deprecated'?: (boolean); - 'uninterpretedOption'?: (_google_protobuf_UninterpretedOption)[]; + 'uninterpretedOption'?: (I_google_protobuf_UninterpretedOption)[]; } -export interface EnumValueOptions__Output { +export interface OEnumValueOptions { 'deprecated': (boolean); - 'uninterpretedOption': (_google_protobuf_UninterpretedOption__Output)[]; + 'uninterpretedOption': (O_google_protobuf_UninterpretedOption)[]; } diff --git a/packages/proto-loader/golden-generated/google/protobuf/FieldDescriptorProto.ts b/packages/proto-loader/golden-generated/google/protobuf/FieldDescriptorProto.ts index c511e2eff..1bcb69abe 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/FieldDescriptorProto.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/FieldDescriptorProto.ts @@ -1,60 +1,110 @@ // Original file: null -import type { FieldOptions as _google_protobuf_FieldOptions, FieldOptions__Output as _google_protobuf_FieldOptions__Output } from '../../google/protobuf/FieldOptions'; +import type { IFieldOptions as I_google_protobuf_FieldOptions, OFieldOptions as O_google_protobuf_FieldOptions } from '../../google/protobuf/FieldOptions'; // Original file: null -export enum _google_protobuf_FieldDescriptorProto_Label { - LABEL_OPTIONAL = 1, - LABEL_REQUIRED = 2, - LABEL_REPEATED = 3, -} +export const _google_protobuf_FieldDescriptorProto_Label = { + LABEL_OPTIONAL: 'LABEL_OPTIONAL', + LABEL_REQUIRED: 'LABEL_REQUIRED', + LABEL_REPEATED: 'LABEL_REPEATED', +} as const; + +export type I_google_protobuf_FieldDescriptorProto_Label = + | 'LABEL_OPTIONAL' + | 1 + | 'LABEL_REQUIRED' + | 2 + | 'LABEL_REPEATED' + | 3 + +export type O_google_protobuf_FieldDescriptorProto_Label = typeof _google_protobuf_FieldDescriptorProto_Label[keyof typeof _google_protobuf_FieldDescriptorProto_Label] // Original file: null -export enum _google_protobuf_FieldDescriptorProto_Type { - TYPE_DOUBLE = 1, - TYPE_FLOAT = 2, - TYPE_INT64 = 3, - TYPE_UINT64 = 4, - TYPE_INT32 = 5, - TYPE_FIXED64 = 6, - TYPE_FIXED32 = 7, - TYPE_BOOL = 8, - TYPE_STRING = 9, - TYPE_GROUP = 10, - TYPE_MESSAGE = 11, - TYPE_BYTES = 12, - TYPE_UINT32 = 13, - TYPE_ENUM = 14, - TYPE_SFIXED32 = 15, - TYPE_SFIXED64 = 16, - TYPE_SINT32 = 17, - TYPE_SINT64 = 18, -} +export const _google_protobuf_FieldDescriptorProto_Type = { + TYPE_DOUBLE: 'TYPE_DOUBLE', + TYPE_FLOAT: 'TYPE_FLOAT', + TYPE_INT64: 'TYPE_INT64', + TYPE_UINT64: 'TYPE_UINT64', + TYPE_INT32: 'TYPE_INT32', + TYPE_FIXED64: 'TYPE_FIXED64', + TYPE_FIXED32: 'TYPE_FIXED32', + TYPE_BOOL: 'TYPE_BOOL', + TYPE_STRING: 'TYPE_STRING', + TYPE_GROUP: 'TYPE_GROUP', + TYPE_MESSAGE: 'TYPE_MESSAGE', + TYPE_BYTES: 'TYPE_BYTES', + TYPE_UINT32: 'TYPE_UINT32', + TYPE_ENUM: 'TYPE_ENUM', + TYPE_SFIXED32: 'TYPE_SFIXED32', + TYPE_SFIXED64: 'TYPE_SFIXED64', + TYPE_SINT32: 'TYPE_SINT32', + TYPE_SINT64: 'TYPE_SINT64', +} as const; + +export type I_google_protobuf_FieldDescriptorProto_Type = + | 'TYPE_DOUBLE' + | 1 + | 'TYPE_FLOAT' + | 2 + | 'TYPE_INT64' + | 3 + | 'TYPE_UINT64' + | 4 + | 'TYPE_INT32' + | 5 + | 'TYPE_FIXED64' + | 6 + | 'TYPE_FIXED32' + | 7 + | 'TYPE_BOOL' + | 8 + | 'TYPE_STRING' + | 9 + | 'TYPE_GROUP' + | 10 + | 'TYPE_MESSAGE' + | 11 + | 'TYPE_BYTES' + | 12 + | 'TYPE_UINT32' + | 13 + | 'TYPE_ENUM' + | 14 + | 'TYPE_SFIXED32' + | 15 + | 'TYPE_SFIXED64' + | 16 + | 'TYPE_SINT32' + | 17 + | 'TYPE_SINT64' + | 18 + +export type O_google_protobuf_FieldDescriptorProto_Type = typeof _google_protobuf_FieldDescriptorProto_Type[keyof typeof _google_protobuf_FieldDescriptorProto_Type] -export interface FieldDescriptorProto { +export interface IFieldDescriptorProto { 'name'?: (string); 'extendee'?: (string); 'number'?: (number); - 'label'?: (_google_protobuf_FieldDescriptorProto_Label | keyof typeof _google_protobuf_FieldDescriptorProto_Label); - 'type'?: (_google_protobuf_FieldDescriptorProto_Type | keyof typeof _google_protobuf_FieldDescriptorProto_Type); + 'label'?: (I_google_protobuf_FieldDescriptorProto_Label); + 'type'?: (I_google_protobuf_FieldDescriptorProto_Type); 'typeName'?: (string); 'defaultValue'?: (string); - 'options'?: (_google_protobuf_FieldOptions | null); + 'options'?: (I_google_protobuf_FieldOptions | null); 'oneofIndex'?: (number); 'jsonName'?: (string); } -export interface FieldDescriptorProto__Output { +export interface OFieldDescriptorProto { 'name': (string); 'extendee': (string); 'number': (number); - 'label': (keyof typeof _google_protobuf_FieldDescriptorProto_Label); - 'type': (keyof typeof _google_protobuf_FieldDescriptorProto_Type); + 'label': (O_google_protobuf_FieldDescriptorProto_Label); + 'type': (O_google_protobuf_FieldDescriptorProto_Type); 'typeName': (string); 'defaultValue': (string); - 'options': (_google_protobuf_FieldOptions__Output | null); + 'options': (O_google_protobuf_FieldOptions | null); 'oneofIndex': (number); 'jsonName': (string); } diff --git a/packages/proto-loader/golden-generated/google/protobuf/FieldOptions.ts b/packages/proto-loader/golden-generated/google/protobuf/FieldOptions.ts index 8304053f1..16e532d95 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/FieldOptions.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/FieldOptions.ts @@ -1,42 +1,62 @@ // Original file: null -import type { UninterpretedOption as _google_protobuf_UninterpretedOption, UninterpretedOption__Output as _google_protobuf_UninterpretedOption__Output } from '../../google/protobuf/UninterpretedOption'; -import type { FieldBehavior as _google_api_FieldBehavior } from '../../google/api/FieldBehavior'; +import type { IUninterpretedOption as I_google_protobuf_UninterpretedOption, OUninterpretedOption as O_google_protobuf_UninterpretedOption } from '../../google/protobuf/UninterpretedOption'; +import type { IFieldBehavior as I_google_api_FieldBehavior, OFieldBehavior as O_google_api_FieldBehavior } from '../../google/api/FieldBehavior'; // Original file: null -export enum _google_protobuf_FieldOptions_CType { - STRING = 0, - CORD = 1, - STRING_PIECE = 2, -} +export const _google_protobuf_FieldOptions_CType = { + STRING: 'STRING', + CORD: 'CORD', + STRING_PIECE: 'STRING_PIECE', +} as const; + +export type I_google_protobuf_FieldOptions_CType = + | 'STRING' + | 0 + | 'CORD' + | 1 + | 'STRING_PIECE' + | 2 + +export type O_google_protobuf_FieldOptions_CType = typeof _google_protobuf_FieldOptions_CType[keyof typeof _google_protobuf_FieldOptions_CType] // Original file: null -export enum _google_protobuf_FieldOptions_JSType { - JS_NORMAL = 0, - JS_STRING = 1, - JS_NUMBER = 2, -} +export const _google_protobuf_FieldOptions_JSType = { + JS_NORMAL: 'JS_NORMAL', + JS_STRING: 'JS_STRING', + JS_NUMBER: 'JS_NUMBER', +} as const; + +export type I_google_protobuf_FieldOptions_JSType = + | 'JS_NORMAL' + | 0 + | 'JS_STRING' + | 1 + | 'JS_NUMBER' + | 2 + +export type O_google_protobuf_FieldOptions_JSType = typeof _google_protobuf_FieldOptions_JSType[keyof typeof _google_protobuf_FieldOptions_JSType] -export interface FieldOptions { - 'ctype'?: (_google_protobuf_FieldOptions_CType | keyof typeof _google_protobuf_FieldOptions_CType); +export interface IFieldOptions { + 'ctype'?: (I_google_protobuf_FieldOptions_CType); 'packed'?: (boolean); 'deprecated'?: (boolean); 'lazy'?: (boolean); - 'jstype'?: (_google_protobuf_FieldOptions_JSType | keyof typeof _google_protobuf_FieldOptions_JSType); + 'jstype'?: (I_google_protobuf_FieldOptions_JSType); 'weak'?: (boolean); - 'uninterpretedOption'?: (_google_protobuf_UninterpretedOption)[]; - '.google.api.field_behavior'?: (_google_api_FieldBehavior | keyof typeof _google_api_FieldBehavior)[]; + 'uninterpretedOption'?: (I_google_protobuf_UninterpretedOption)[]; + '.google.api.field_behavior'?: (I_google_api_FieldBehavior)[]; } -export interface FieldOptions__Output { - 'ctype': (keyof typeof _google_protobuf_FieldOptions_CType); +export interface OFieldOptions { + 'ctype': (O_google_protobuf_FieldOptions_CType); 'packed': (boolean); 'deprecated': (boolean); 'lazy': (boolean); - 'jstype': (keyof typeof _google_protobuf_FieldOptions_JSType); + 'jstype': (O_google_protobuf_FieldOptions_JSType); 'weak': (boolean); - 'uninterpretedOption': (_google_protobuf_UninterpretedOption__Output)[]; - '.google.api.field_behavior': (keyof typeof _google_api_FieldBehavior)[]; + 'uninterpretedOption': (O_google_protobuf_UninterpretedOption)[]; + '.google.api.field_behavior': (O_google_api_FieldBehavior)[]; } diff --git a/packages/proto-loader/golden-generated/google/protobuf/FileDescriptorProto.ts b/packages/proto-loader/golden-generated/google/protobuf/FileDescriptorProto.ts index b723da7c0..c98732f9d 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/FileDescriptorProto.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/FileDescriptorProto.ts @@ -1,37 +1,37 @@ // Original file: null -import type { DescriptorProto as _google_protobuf_DescriptorProto, DescriptorProto__Output as _google_protobuf_DescriptorProto__Output } from '../../google/protobuf/DescriptorProto'; -import type { EnumDescriptorProto as _google_protobuf_EnumDescriptorProto, EnumDescriptorProto__Output as _google_protobuf_EnumDescriptorProto__Output } from '../../google/protobuf/EnumDescriptorProto'; -import type { ServiceDescriptorProto as _google_protobuf_ServiceDescriptorProto, ServiceDescriptorProto__Output as _google_protobuf_ServiceDescriptorProto__Output } from '../../google/protobuf/ServiceDescriptorProto'; -import type { FieldDescriptorProto as _google_protobuf_FieldDescriptorProto, FieldDescriptorProto__Output as _google_protobuf_FieldDescriptorProto__Output } from '../../google/protobuf/FieldDescriptorProto'; -import type { FileOptions as _google_protobuf_FileOptions, FileOptions__Output as _google_protobuf_FileOptions__Output } from '../../google/protobuf/FileOptions'; -import type { SourceCodeInfo as _google_protobuf_SourceCodeInfo, SourceCodeInfo__Output as _google_protobuf_SourceCodeInfo__Output } from '../../google/protobuf/SourceCodeInfo'; +import type { IDescriptorProto as I_google_protobuf_DescriptorProto, ODescriptorProto as O_google_protobuf_DescriptorProto } from '../../google/protobuf/DescriptorProto'; +import type { IEnumDescriptorProto as I_google_protobuf_EnumDescriptorProto, OEnumDescriptorProto as O_google_protobuf_EnumDescriptorProto } from '../../google/protobuf/EnumDescriptorProto'; +import type { IServiceDescriptorProto as I_google_protobuf_ServiceDescriptorProto, OServiceDescriptorProto as O_google_protobuf_ServiceDescriptorProto } from '../../google/protobuf/ServiceDescriptorProto'; +import type { IFieldDescriptorProto as I_google_protobuf_FieldDescriptorProto, OFieldDescriptorProto as O_google_protobuf_FieldDescriptorProto } from '../../google/protobuf/FieldDescriptorProto'; +import type { IFileOptions as I_google_protobuf_FileOptions, OFileOptions as O_google_protobuf_FileOptions } from '../../google/protobuf/FileOptions'; +import type { ISourceCodeInfo as I_google_protobuf_SourceCodeInfo, OSourceCodeInfo as O_google_protobuf_SourceCodeInfo } from '../../google/protobuf/SourceCodeInfo'; -export interface FileDescriptorProto { +export interface IFileDescriptorProto { 'name'?: (string); 'package'?: (string); 'dependency'?: (string)[]; - 'messageType'?: (_google_protobuf_DescriptorProto)[]; - 'enumType'?: (_google_protobuf_EnumDescriptorProto)[]; - 'service'?: (_google_protobuf_ServiceDescriptorProto)[]; - 'extension'?: (_google_protobuf_FieldDescriptorProto)[]; - 'options'?: (_google_protobuf_FileOptions | null); - 'sourceCodeInfo'?: (_google_protobuf_SourceCodeInfo | null); + 'messageType'?: (I_google_protobuf_DescriptorProto)[]; + 'enumType'?: (I_google_protobuf_EnumDescriptorProto)[]; + 'service'?: (I_google_protobuf_ServiceDescriptorProto)[]; + 'extension'?: (I_google_protobuf_FieldDescriptorProto)[]; + 'options'?: (I_google_protobuf_FileOptions | null); + 'sourceCodeInfo'?: (I_google_protobuf_SourceCodeInfo | null); 'publicDependency'?: (number)[]; 'weakDependency'?: (number)[]; 'syntax'?: (string); } -export interface FileDescriptorProto__Output { +export interface OFileDescriptorProto { 'name': (string); 'package': (string); 'dependency': (string)[]; - 'messageType': (_google_protobuf_DescriptorProto__Output)[]; - 'enumType': (_google_protobuf_EnumDescriptorProto__Output)[]; - 'service': (_google_protobuf_ServiceDescriptorProto__Output)[]; - 'extension': (_google_protobuf_FieldDescriptorProto__Output)[]; - 'options': (_google_protobuf_FileOptions__Output | null); - 'sourceCodeInfo': (_google_protobuf_SourceCodeInfo__Output | null); + 'messageType': (O_google_protobuf_DescriptorProto)[]; + 'enumType': (O_google_protobuf_EnumDescriptorProto)[]; + 'service': (O_google_protobuf_ServiceDescriptorProto)[]; + 'extension': (O_google_protobuf_FieldDescriptorProto)[]; + 'options': (O_google_protobuf_FileOptions | null); + 'sourceCodeInfo': (O_google_protobuf_SourceCodeInfo | null); 'publicDependency': (number)[]; 'weakDependency': (number)[]; 'syntax': (string); diff --git a/packages/proto-loader/golden-generated/google/protobuf/FileDescriptorSet.ts b/packages/proto-loader/golden-generated/google/protobuf/FileDescriptorSet.ts index 74ded2471..9c940ed5e 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/FileDescriptorSet.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/FileDescriptorSet.ts @@ -1,11 +1,11 @@ // Original file: null -import type { FileDescriptorProto as _google_protobuf_FileDescriptorProto, FileDescriptorProto__Output as _google_protobuf_FileDescriptorProto__Output } from '../../google/protobuf/FileDescriptorProto'; +import type { IFileDescriptorProto as I_google_protobuf_FileDescriptorProto, OFileDescriptorProto as O_google_protobuf_FileDescriptorProto } from '../../google/protobuf/FileDescriptorProto'; -export interface FileDescriptorSet { - 'file'?: (_google_protobuf_FileDescriptorProto)[]; +export interface IFileDescriptorSet { + 'file'?: (I_google_protobuf_FileDescriptorProto)[]; } -export interface FileDescriptorSet__Output { - 'file': (_google_protobuf_FileDescriptorProto__Output)[]; +export interface OFileDescriptorSet { + 'file': (O_google_protobuf_FileDescriptorProto)[]; } diff --git a/packages/proto-loader/golden-generated/google/protobuf/FileOptions.ts b/packages/proto-loader/golden-generated/google/protobuf/FileOptions.ts index 573e847c0..c80374024 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/FileOptions.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/FileOptions.ts @@ -1,47 +1,63 @@ // Original file: null -import type { UninterpretedOption as _google_protobuf_UninterpretedOption, UninterpretedOption__Output as _google_protobuf_UninterpretedOption__Output } from '../../google/protobuf/UninterpretedOption'; +import type { IUninterpretedOption as I_google_protobuf_UninterpretedOption, OUninterpretedOption as O_google_protobuf_UninterpretedOption } from '../../google/protobuf/UninterpretedOption'; // Original file: null -export enum _google_protobuf_FileOptions_OptimizeMode { - SPEED = 1, - CODE_SIZE = 2, - LITE_RUNTIME = 3, -} +export const _google_protobuf_FileOptions_OptimizeMode = { + SPEED: 'SPEED', + CODE_SIZE: 'CODE_SIZE', + LITE_RUNTIME: 'LITE_RUNTIME', +} as const; + +export type I_google_protobuf_FileOptions_OptimizeMode = + | 'SPEED' + | 1 + | 'CODE_SIZE' + | 2 + | 'LITE_RUNTIME' + | 3 + +export type O_google_protobuf_FileOptions_OptimizeMode = typeof _google_protobuf_FileOptions_OptimizeMode[keyof typeof _google_protobuf_FileOptions_OptimizeMode] -export interface FileOptions { +export interface IFileOptions { 'javaPackage'?: (string); 'javaOuterClassname'?: (string); - 'optimizeFor'?: (_google_protobuf_FileOptions_OptimizeMode | keyof typeof _google_protobuf_FileOptions_OptimizeMode); + 'optimizeFor'?: (I_google_protobuf_FileOptions_OptimizeMode); 'javaMultipleFiles'?: (boolean); 'goPackage'?: (string); 'ccGenericServices'?: (boolean); 'javaGenericServices'?: (boolean); 'pyGenericServices'?: (boolean); + /** + * @deprecated + */ 'javaGenerateEqualsAndHash'?: (boolean); 'deprecated'?: (boolean); 'javaStringCheckUtf8'?: (boolean); 'ccEnableArenas'?: (boolean); 'objcClassPrefix'?: (string); 'csharpNamespace'?: (string); - 'uninterpretedOption'?: (_google_protobuf_UninterpretedOption)[]; + 'uninterpretedOption'?: (I_google_protobuf_UninterpretedOption)[]; } -export interface FileOptions__Output { +export interface OFileOptions { 'javaPackage': (string); 'javaOuterClassname': (string); - 'optimizeFor': (keyof typeof _google_protobuf_FileOptions_OptimizeMode); + 'optimizeFor': (O_google_protobuf_FileOptions_OptimizeMode); 'javaMultipleFiles': (boolean); 'goPackage': (string); 'ccGenericServices': (boolean); 'javaGenericServices': (boolean); 'pyGenericServices': (boolean); + /** + * @deprecated + */ 'javaGenerateEqualsAndHash': (boolean); 'deprecated': (boolean); 'javaStringCheckUtf8': (boolean); 'ccEnableArenas': (boolean); 'objcClassPrefix': (string); 'csharpNamespace': (string); - 'uninterpretedOption': (_google_protobuf_UninterpretedOption__Output)[]; + 'uninterpretedOption': (O_google_protobuf_UninterpretedOption)[]; } diff --git a/packages/proto-loader/golden-generated/google/protobuf/GeneratedCodeInfo.ts b/packages/proto-loader/golden-generated/google/protobuf/GeneratedCodeInfo.ts index 019fb0e15..62f9dc715 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/GeneratedCodeInfo.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/GeneratedCodeInfo.ts @@ -1,24 +1,24 @@ // Original file: null -export interface _google_protobuf_GeneratedCodeInfo_Annotation { +export interface I_google_protobuf_GeneratedCodeInfo_Annotation { 'path'?: (number)[]; 'sourceFile'?: (string); 'begin'?: (number); 'end'?: (number); } -export interface _google_protobuf_GeneratedCodeInfo_Annotation__Output { +export interface O_google_protobuf_GeneratedCodeInfo_Annotation { 'path': (number)[]; 'sourceFile': (string); 'begin': (number); 'end': (number); } -export interface GeneratedCodeInfo { - 'annotation'?: (_google_protobuf_GeneratedCodeInfo_Annotation)[]; +export interface IGeneratedCodeInfo { + 'annotation'?: (I_google_protobuf_GeneratedCodeInfo_Annotation)[]; } -export interface GeneratedCodeInfo__Output { - 'annotation': (_google_protobuf_GeneratedCodeInfo_Annotation__Output)[]; +export interface OGeneratedCodeInfo { + 'annotation': (O_google_protobuf_GeneratedCodeInfo_Annotation)[]; } diff --git a/packages/proto-loader/golden-generated/google/protobuf/MessageOptions.ts b/packages/proto-loader/golden-generated/google/protobuf/MessageOptions.ts index 31f669eb0..8c8885e63 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/MessageOptions.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/MessageOptions.ts @@ -1,19 +1,19 @@ // Original file: null -import type { UninterpretedOption as _google_protobuf_UninterpretedOption, UninterpretedOption__Output as _google_protobuf_UninterpretedOption__Output } from '../../google/protobuf/UninterpretedOption'; +import type { IUninterpretedOption as I_google_protobuf_UninterpretedOption, OUninterpretedOption as O_google_protobuf_UninterpretedOption } from '../../google/protobuf/UninterpretedOption'; -export interface MessageOptions { +export interface IMessageOptions { 'messageSetWireFormat'?: (boolean); 'noStandardDescriptorAccessor'?: (boolean); 'deprecated'?: (boolean); 'mapEntry'?: (boolean); - 'uninterpretedOption'?: (_google_protobuf_UninterpretedOption)[]; + 'uninterpretedOption'?: (I_google_protobuf_UninterpretedOption)[]; } -export interface MessageOptions__Output { +export interface OMessageOptions { 'messageSetWireFormat': (boolean); 'noStandardDescriptorAccessor': (boolean); 'deprecated': (boolean); 'mapEntry': (boolean); - 'uninterpretedOption': (_google_protobuf_UninterpretedOption__Output)[]; + 'uninterpretedOption': (O_google_protobuf_UninterpretedOption)[]; } diff --git a/packages/proto-loader/golden-generated/google/protobuf/MethodDescriptorProto.ts b/packages/proto-loader/golden-generated/google/protobuf/MethodDescriptorProto.ts index c76c0ea23..0826370df 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/MethodDescriptorProto.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/MethodDescriptorProto.ts @@ -1,21 +1,21 @@ // Original file: null -import type { MethodOptions as _google_protobuf_MethodOptions, MethodOptions__Output as _google_protobuf_MethodOptions__Output } from '../../google/protobuf/MethodOptions'; +import type { IMethodOptions as I_google_protobuf_MethodOptions, OMethodOptions as O_google_protobuf_MethodOptions } from '../../google/protobuf/MethodOptions'; -export interface MethodDescriptorProto { +export interface IMethodDescriptorProto { 'name'?: (string); 'inputType'?: (string); 'outputType'?: (string); - 'options'?: (_google_protobuf_MethodOptions | null); + 'options'?: (I_google_protobuf_MethodOptions | null); 'clientStreaming'?: (boolean); 'serverStreaming'?: (boolean); } -export interface MethodDescriptorProto__Output { +export interface OMethodDescriptorProto { 'name': (string); 'inputType': (string); 'outputType': (string); - 'options': (_google_protobuf_MethodOptions__Output | null); + 'options': (O_google_protobuf_MethodOptions | null); 'clientStreaming': (boolean); 'serverStreaming': (boolean); } diff --git a/packages/proto-loader/golden-generated/google/protobuf/MethodOptions.ts b/packages/proto-loader/golden-generated/google/protobuf/MethodOptions.ts index 7581b9643..5f0b69008 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/MethodOptions.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/MethodOptions.ts @@ -1,21 +1,21 @@ // Original file: null -import type { UninterpretedOption as _google_protobuf_UninterpretedOption, UninterpretedOption__Output as _google_protobuf_UninterpretedOption__Output } from '../../google/protobuf/UninterpretedOption'; -import type { OperationInfo as _google_longrunning_OperationInfo, OperationInfo__Output as _google_longrunning_OperationInfo__Output } from '../../google/longrunning/OperationInfo'; -import type { HttpRule as _google_api_HttpRule, HttpRule__Output as _google_api_HttpRule__Output } from '../../google/api/HttpRule'; +import type { IUninterpretedOption as I_google_protobuf_UninterpretedOption, OUninterpretedOption as O_google_protobuf_UninterpretedOption } from '../../google/protobuf/UninterpretedOption'; +import type { IOperationInfo as I_google_longrunning_OperationInfo, OOperationInfo as O_google_longrunning_OperationInfo } from '../../google/longrunning/OperationInfo'; +import type { IHttpRule as I_google_api_HttpRule, OHttpRule as O_google_api_HttpRule } from '../../google/api/HttpRule'; -export interface MethodOptions { +export interface IMethodOptions { 'deprecated'?: (boolean); - 'uninterpretedOption'?: (_google_protobuf_UninterpretedOption)[]; - '.google.longrunning.operation_info'?: (_google_longrunning_OperationInfo | null); + 'uninterpretedOption'?: (I_google_protobuf_UninterpretedOption)[]; + '.google.longrunning.operation_info'?: (I_google_longrunning_OperationInfo | null); '.google.api.method_signature'?: (string)[]; - '.google.api.http'?: (_google_api_HttpRule | null); + '.google.api.http'?: (I_google_api_HttpRule | null); } -export interface MethodOptions__Output { +export interface OMethodOptions { 'deprecated': (boolean); - 'uninterpretedOption': (_google_protobuf_UninterpretedOption__Output)[]; - '.google.longrunning.operation_info': (_google_longrunning_OperationInfo__Output | null); + 'uninterpretedOption': (O_google_protobuf_UninterpretedOption)[]; + '.google.longrunning.operation_info': (O_google_longrunning_OperationInfo | null); '.google.api.method_signature': (string)[]; - '.google.api.http': (_google_api_HttpRule__Output | null); + '.google.api.http': (O_google_api_HttpRule | null); } diff --git a/packages/proto-loader/golden-generated/google/protobuf/OneofDescriptorProto.ts b/packages/proto-loader/golden-generated/google/protobuf/OneofDescriptorProto.ts index 636f13ed4..6394270ea 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/OneofDescriptorProto.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/OneofDescriptorProto.ts @@ -1,13 +1,13 @@ // Original file: null -import type { OneofOptions as _google_protobuf_OneofOptions, OneofOptions__Output as _google_protobuf_OneofOptions__Output } from '../../google/protobuf/OneofOptions'; +import type { IOneofOptions as I_google_protobuf_OneofOptions, OOneofOptions as O_google_protobuf_OneofOptions } from '../../google/protobuf/OneofOptions'; -export interface OneofDescriptorProto { +export interface IOneofDescriptorProto { 'name'?: (string); - 'options'?: (_google_protobuf_OneofOptions | null); + 'options'?: (I_google_protobuf_OneofOptions | null); } -export interface OneofDescriptorProto__Output { +export interface OOneofDescriptorProto { 'name': (string); - 'options': (_google_protobuf_OneofOptions__Output | null); + 'options': (O_google_protobuf_OneofOptions | null); } diff --git a/packages/proto-loader/golden-generated/google/protobuf/OneofOptions.ts b/packages/proto-loader/golden-generated/google/protobuf/OneofOptions.ts index d81d34797..73280ad73 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/OneofOptions.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/OneofOptions.ts @@ -1,11 +1,11 @@ // Original file: null -import type { UninterpretedOption as _google_protobuf_UninterpretedOption, UninterpretedOption__Output as _google_protobuf_UninterpretedOption__Output } from '../../google/protobuf/UninterpretedOption'; +import type { IUninterpretedOption as I_google_protobuf_UninterpretedOption, OUninterpretedOption as O_google_protobuf_UninterpretedOption } from '../../google/protobuf/UninterpretedOption'; -export interface OneofOptions { - 'uninterpretedOption'?: (_google_protobuf_UninterpretedOption)[]; +export interface IOneofOptions { + 'uninterpretedOption'?: (I_google_protobuf_UninterpretedOption)[]; } -export interface OneofOptions__Output { - 'uninterpretedOption': (_google_protobuf_UninterpretedOption__Output)[]; +export interface OOneofOptions { + 'uninterpretedOption': (O_google_protobuf_UninterpretedOption)[]; } diff --git a/packages/proto-loader/golden-generated/google/protobuf/ServiceDescriptorProto.ts b/packages/proto-loader/golden-generated/google/protobuf/ServiceDescriptorProto.ts index 40c9263ea..a0427fda5 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/ServiceDescriptorProto.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/ServiceDescriptorProto.ts @@ -1,16 +1,16 @@ // Original file: null -import type { MethodDescriptorProto as _google_protobuf_MethodDescriptorProto, MethodDescriptorProto__Output as _google_protobuf_MethodDescriptorProto__Output } from '../../google/protobuf/MethodDescriptorProto'; -import type { ServiceOptions as _google_protobuf_ServiceOptions, ServiceOptions__Output as _google_protobuf_ServiceOptions__Output } from '../../google/protobuf/ServiceOptions'; +import type { IMethodDescriptorProto as I_google_protobuf_MethodDescriptorProto, OMethodDescriptorProto as O_google_protobuf_MethodDescriptorProto } from '../../google/protobuf/MethodDescriptorProto'; +import type { IServiceOptions as I_google_protobuf_ServiceOptions, OServiceOptions as O_google_protobuf_ServiceOptions } from '../../google/protobuf/ServiceOptions'; -export interface ServiceDescriptorProto { +export interface IServiceDescriptorProto { 'name'?: (string); - 'method'?: (_google_protobuf_MethodDescriptorProto)[]; - 'options'?: (_google_protobuf_ServiceOptions | null); + 'method'?: (I_google_protobuf_MethodDescriptorProto)[]; + 'options'?: (I_google_protobuf_ServiceOptions | null); } -export interface ServiceDescriptorProto__Output { +export interface OServiceDescriptorProto { 'name': (string); - 'method': (_google_protobuf_MethodDescriptorProto__Output)[]; - 'options': (_google_protobuf_ServiceOptions__Output | null); + 'method': (O_google_protobuf_MethodDescriptorProto)[]; + 'options': (O_google_protobuf_ServiceOptions | null); } diff --git a/packages/proto-loader/golden-generated/google/protobuf/ServiceOptions.ts b/packages/proto-loader/golden-generated/google/protobuf/ServiceOptions.ts index c0522eca3..0ddc8e187 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/ServiceOptions.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/ServiceOptions.ts @@ -1,17 +1,17 @@ // Original file: null -import type { UninterpretedOption as _google_protobuf_UninterpretedOption, UninterpretedOption__Output as _google_protobuf_UninterpretedOption__Output } from '../../google/protobuf/UninterpretedOption'; +import type { IUninterpretedOption as I_google_protobuf_UninterpretedOption, OUninterpretedOption as O_google_protobuf_UninterpretedOption } from '../../google/protobuf/UninterpretedOption'; -export interface ServiceOptions { +export interface IServiceOptions { 'deprecated'?: (boolean); - 'uninterpretedOption'?: (_google_protobuf_UninterpretedOption)[]; + 'uninterpretedOption'?: (I_google_protobuf_UninterpretedOption)[]; '.google.api.default_host'?: (string); '.google.api.oauth_scopes'?: (string); } -export interface ServiceOptions__Output { +export interface OServiceOptions { 'deprecated': (boolean); - 'uninterpretedOption': (_google_protobuf_UninterpretedOption__Output)[]; + 'uninterpretedOption': (O_google_protobuf_UninterpretedOption)[]; '.google.api.default_host': (string); '.google.api.oauth_scopes': (string); } diff --git a/packages/proto-loader/golden-generated/google/protobuf/SourceCodeInfo.ts b/packages/proto-loader/golden-generated/google/protobuf/SourceCodeInfo.ts index d30e59b4f..4d0856604 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/SourceCodeInfo.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/SourceCodeInfo.ts @@ -1,7 +1,7 @@ // Original file: null -export interface _google_protobuf_SourceCodeInfo_Location { +export interface I_google_protobuf_SourceCodeInfo_Location { 'path'?: (number)[]; 'span'?: (number)[]; 'leadingComments'?: (string); @@ -9,7 +9,7 @@ export interface _google_protobuf_SourceCodeInfo_Location { 'leadingDetachedComments'?: (string)[]; } -export interface _google_protobuf_SourceCodeInfo_Location__Output { +export interface O_google_protobuf_SourceCodeInfo_Location { 'path': (number)[]; 'span': (number)[]; 'leadingComments': (string); @@ -17,10 +17,10 @@ export interface _google_protobuf_SourceCodeInfo_Location__Output { 'leadingDetachedComments': (string)[]; } -export interface SourceCodeInfo { - 'location'?: (_google_protobuf_SourceCodeInfo_Location)[]; +export interface ISourceCodeInfo { + 'location'?: (I_google_protobuf_SourceCodeInfo_Location)[]; } -export interface SourceCodeInfo__Output { - 'location': (_google_protobuf_SourceCodeInfo_Location__Output)[]; +export interface OSourceCodeInfo { + 'location': (O_google_protobuf_SourceCodeInfo_Location)[]; } diff --git a/packages/proto-loader/golden-generated/google/protobuf/Timestamp.ts b/packages/proto-loader/golden-generated/google/protobuf/Timestamp.ts index ceaa32b5f..06d756134 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/Timestamp.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/Timestamp.ts @@ -2,12 +2,12 @@ import type { Long } from '@grpc/proto-loader'; -export interface Timestamp { +export interface ITimestamp { 'seconds'?: (number | string | Long); 'nanos'?: (number); } -export interface Timestamp__Output { +export interface OTimestamp { 'seconds': (string); 'nanos': (number); } diff --git a/packages/proto-loader/golden-generated/google/protobuf/UninterpretedOption.ts b/packages/proto-loader/golden-generated/google/protobuf/UninterpretedOption.ts index 433820f55..fa0feaf52 100644 --- a/packages/proto-loader/golden-generated/google/protobuf/UninterpretedOption.ts +++ b/packages/proto-loader/golden-generated/google/protobuf/UninterpretedOption.ts @@ -2,18 +2,18 @@ import type { Long } from '@grpc/proto-loader'; -export interface _google_protobuf_UninterpretedOption_NamePart { +export interface I_google_protobuf_UninterpretedOption_NamePart { 'namePart'?: (string); 'isExtension'?: (boolean); } -export interface _google_protobuf_UninterpretedOption_NamePart__Output { +export interface O_google_protobuf_UninterpretedOption_NamePart { 'namePart': (string); 'isExtension': (boolean); } -export interface UninterpretedOption { - 'name'?: (_google_protobuf_UninterpretedOption_NamePart)[]; +export interface IUninterpretedOption { + 'name'?: (I_google_protobuf_UninterpretedOption_NamePart)[]; 'identifierValue'?: (string); 'positiveIntValue'?: (number | string | Long); 'negativeIntValue'?: (number | string | Long); @@ -22,8 +22,8 @@ export interface UninterpretedOption { 'aggregateValue'?: (string); } -export interface UninterpretedOption__Output { - 'name': (_google_protobuf_UninterpretedOption_NamePart__Output)[]; +export interface OUninterpretedOption { + 'name': (O_google_protobuf_UninterpretedOption_NamePart)[]; 'identifierValue': (string); 'positiveIntValue': (string); 'negativeIntValue': (string); diff --git a/packages/proto-loader/golden-generated/google/rpc/Status.ts b/packages/proto-loader/golden-generated/google/rpc/Status.ts index 4ce45b6a9..05cf71c5f 100644 --- a/packages/proto-loader/golden-generated/google/rpc/Status.ts +++ b/packages/proto-loader/golden-generated/google/rpc/Status.ts @@ -1,6 +1,6 @@ // Original file: deps/googleapis/google/rpc/status.proto -import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__Output } from '../../google/protobuf/Any'; +import type { IAny as I_google_protobuf_Any, OAny as O_google_protobuf_Any } from '../../google/protobuf/Any'; /** * The `Status` type defines a logical error model that is suitable for @@ -11,7 +11,7 @@ import type { Any as _google_protobuf_Any, Any__Output as _google_protobuf_Any__ * You can find out more about this error model and how to work with it in the * [API Design Guide](https://cloud.google.com/apis/design/errors). */ -export interface Status { +export interface IStatus { /** * The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. */ @@ -26,7 +26,7 @@ export interface Status { * A list of messages that carry the error details. There is a common set of * message types for APIs to use. */ - 'details'?: (_google_protobuf_Any)[]; + 'details'?: (I_google_protobuf_Any)[]; } /** @@ -38,7 +38,7 @@ export interface Status { * You can find out more about this error model and how to work with it in the * [API Design Guide](https://cloud.google.com/apis/design/errors). */ -export interface Status__Output { +export interface OStatus { /** * The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. */ @@ -53,5 +53,5 @@ export interface Status__Output { * A list of messages that carry the error details. There is a common set of * message types for APIs to use. */ - 'details': (_google_protobuf_Any__Output)[]; + 'details': (O_google_protobuf_Any)[]; } diff --git a/packages/proto-loader/golden-generated/google/showcase/v1beta1/BlockRequest.ts b/packages/proto-loader/golden-generated/google/showcase/v1beta1/BlockRequest.ts index 383c409c5..29d10f6dd 100644 --- a/packages/proto-loader/golden-generated/google/showcase/v1beta1/BlockRequest.ts +++ b/packages/proto-loader/golden-generated/google/showcase/v1beta1/BlockRequest.ts @@ -1,45 +1,45 @@ // Original file: deps/gapic-showcase/schema/google/showcase/v1beta1/echo.proto -import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../google/protobuf/Duration'; -import type { Status as _google_rpc_Status, Status__Output as _google_rpc_Status__Output } from '../../../google/rpc/Status'; -import type { BlockResponse as _google_showcase_v1beta1_BlockResponse, BlockResponse__Output as _google_showcase_v1beta1_BlockResponse__Output } from '../../../google/showcase/v1beta1/BlockResponse'; +import type { IDuration as I_google_protobuf_Duration, ODuration as O_google_protobuf_Duration } from '../../../google/protobuf/Duration'; +import type { IStatus as I_google_rpc_Status, OStatus as O_google_rpc_Status } from '../../../google/rpc/Status'; +import type { IBlockResponse as I_google_showcase_v1beta1_BlockResponse, OBlockResponse as O_google_showcase_v1beta1_BlockResponse } from '../../../google/showcase/v1beta1/BlockResponse'; /** * The request for Block method. */ -export interface BlockRequest { +export interface IBlockRequest { /** * The amount of time to block before returning a response. */ - 'response_delay'?: (_google_protobuf_Duration | null); + 'response_delay'?: (I_google_protobuf_Duration | null); /** * The error that will be returned by the server. If this code is specified * to be the OK rpc code, an empty response will be returned. */ - 'error'?: (_google_rpc_Status | null); + 'error'?: (I_google_rpc_Status | null); /** * The response to be returned that will signify successful method call. */ - 'success'?: (_google_showcase_v1beta1_BlockResponse | null); + 'success'?: (I_google_showcase_v1beta1_BlockResponse | null); 'response'?: "error"|"success"; } /** * The request for Block method. */ -export interface BlockRequest__Output { +export interface OBlockRequest { /** * The amount of time to block before returning a response. */ - 'response_delay': (_google_protobuf_Duration__Output | null); + 'response_delay': (O_google_protobuf_Duration | null); /** * The error that will be returned by the server. If this code is specified * to be the OK rpc code, an empty response will be returned. */ - 'error'?: (_google_rpc_Status__Output | null); + 'error'?: (O_google_rpc_Status | null); /** * The response to be returned that will signify successful method call. */ - 'success'?: (_google_showcase_v1beta1_BlockResponse__Output | null); + 'success'?: (O_google_showcase_v1beta1_BlockResponse | null); 'response': "error"|"success"; } diff --git a/packages/proto-loader/golden-generated/google/showcase/v1beta1/BlockResponse.ts b/packages/proto-loader/golden-generated/google/showcase/v1beta1/BlockResponse.ts index 5634b19d4..3bb9bddf2 100644 --- a/packages/proto-loader/golden-generated/google/showcase/v1beta1/BlockResponse.ts +++ b/packages/proto-loader/golden-generated/google/showcase/v1beta1/BlockResponse.ts @@ -4,7 +4,7 @@ /** * The response for Block method. */ -export interface BlockResponse { +export interface IBlockResponse { /** * This content can contain anything, the server will not depend on a value * here. @@ -15,7 +15,7 @@ export interface BlockResponse { /** * The response for Block method. */ -export interface BlockResponse__Output { +export interface OBlockResponse { /** * This content can contain anything, the server will not depend on a value * here. diff --git a/packages/proto-loader/golden-generated/google/showcase/v1beta1/Echo.ts b/packages/proto-loader/golden-generated/google/showcase/v1beta1/Echo.ts index acb911270..a0330fe68 100644 --- a/packages/proto-loader/golden-generated/google/showcase/v1beta1/Echo.ts +++ b/packages/proto-loader/golden-generated/google/showcase/v1beta1/Echo.ts @@ -2,15 +2,15 @@ import type * as grpc from '@grpc/grpc-js' import type { MethodDefinition } from '@grpc/proto-loader' -import type { BlockRequest as _google_showcase_v1beta1_BlockRequest, BlockRequest__Output as _google_showcase_v1beta1_BlockRequest__Output } from '../../../google/showcase/v1beta1/BlockRequest'; -import type { BlockResponse as _google_showcase_v1beta1_BlockResponse, BlockResponse__Output as _google_showcase_v1beta1_BlockResponse__Output } from '../../../google/showcase/v1beta1/BlockResponse'; -import type { EchoRequest as _google_showcase_v1beta1_EchoRequest, EchoRequest__Output as _google_showcase_v1beta1_EchoRequest__Output } from '../../../google/showcase/v1beta1/EchoRequest'; -import type { EchoResponse as _google_showcase_v1beta1_EchoResponse, EchoResponse__Output as _google_showcase_v1beta1_EchoResponse__Output } from '../../../google/showcase/v1beta1/EchoResponse'; -import type { ExpandRequest as _google_showcase_v1beta1_ExpandRequest, ExpandRequest__Output as _google_showcase_v1beta1_ExpandRequest__Output } from '../../../google/showcase/v1beta1/ExpandRequest'; -import type { Operation as _google_longrunning_Operation, Operation__Output as _google_longrunning_Operation__Output } from '../../../google/longrunning/Operation'; -import type { PagedExpandRequest as _google_showcase_v1beta1_PagedExpandRequest, PagedExpandRequest__Output as _google_showcase_v1beta1_PagedExpandRequest__Output } from '../../../google/showcase/v1beta1/PagedExpandRequest'; -import type { PagedExpandResponse as _google_showcase_v1beta1_PagedExpandResponse, PagedExpandResponse__Output as _google_showcase_v1beta1_PagedExpandResponse__Output } from '../../../google/showcase/v1beta1/PagedExpandResponse'; -import type { WaitRequest as _google_showcase_v1beta1_WaitRequest, WaitRequest__Output as _google_showcase_v1beta1_WaitRequest__Output } from '../../../google/showcase/v1beta1/WaitRequest'; +import type { IBlockRequest as I_google_showcase_v1beta1_BlockRequest, OBlockRequest as O_google_showcase_v1beta1_BlockRequest } from '../../../google/showcase/v1beta1/BlockRequest'; +import type { IBlockResponse as I_google_showcase_v1beta1_BlockResponse, OBlockResponse as O_google_showcase_v1beta1_BlockResponse } from '../../../google/showcase/v1beta1/BlockResponse'; +import type { IEchoRequest as I_google_showcase_v1beta1_EchoRequest, OEchoRequest as O_google_showcase_v1beta1_EchoRequest } from '../../../google/showcase/v1beta1/EchoRequest'; +import type { IEchoResponse as I_google_showcase_v1beta1_EchoResponse, OEchoResponse as O_google_showcase_v1beta1_EchoResponse } from '../../../google/showcase/v1beta1/EchoResponse'; +import type { IExpandRequest as I_google_showcase_v1beta1_ExpandRequest, OExpandRequest as O_google_showcase_v1beta1_ExpandRequest } from '../../../google/showcase/v1beta1/ExpandRequest'; +import type { IOperation as I_google_longrunning_Operation, OOperation as O_google_longrunning_Operation } from '../../../google/longrunning/Operation'; +import type { IPagedExpandRequest as I_google_showcase_v1beta1_PagedExpandRequest, OPagedExpandRequest as O_google_showcase_v1beta1_PagedExpandRequest } from '../../../google/showcase/v1beta1/PagedExpandRequest'; +import type { IPagedExpandResponse as I_google_showcase_v1beta1_PagedExpandResponse, OPagedExpandResponse as O_google_showcase_v1beta1_PagedExpandResponse } from '../../../google/showcase/v1beta1/PagedExpandResponse'; +import type { IWaitRequest as I_google_showcase_v1beta1_WaitRequest, OWaitRequest as O_google_showcase_v1beta1_WaitRequest } from '../../../google/showcase/v1beta1/WaitRequest'; /** * This service is used showcase the four main types of rpcs - unary, server @@ -25,115 +25,115 @@ export interface EchoClient extends grpc.Client { * and then return the response or error. * This method showcases how a client handles delays or retries. */ - Block(argument: _google_showcase_v1beta1_BlockRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_BlockResponse__Output) => void): grpc.ClientUnaryCall; - Block(argument: _google_showcase_v1beta1_BlockRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_BlockResponse__Output) => void): grpc.ClientUnaryCall; - Block(argument: _google_showcase_v1beta1_BlockRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_BlockResponse__Output) => void): grpc.ClientUnaryCall; - Block(argument: _google_showcase_v1beta1_BlockRequest, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_BlockResponse__Output) => void): grpc.ClientUnaryCall; + Block(argument: I_google_showcase_v1beta1_BlockRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + Block(argument: I_google_showcase_v1beta1_BlockRequest, metadata: grpc.Metadata, callback: grpc.requestCallback): grpc.ClientUnaryCall; + Block(argument: I_google_showcase_v1beta1_BlockRequest, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + Block(argument: I_google_showcase_v1beta1_BlockRequest, callback: grpc.requestCallback): grpc.ClientUnaryCall; /** * This method will block (wait) for the requested amount of time * and then return the response or error. * This method showcases how a client handles delays or retries. */ - block(argument: _google_showcase_v1beta1_BlockRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_BlockResponse__Output) => void): grpc.ClientUnaryCall; - block(argument: _google_showcase_v1beta1_BlockRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_BlockResponse__Output) => void): grpc.ClientUnaryCall; - block(argument: _google_showcase_v1beta1_BlockRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_BlockResponse__Output) => void): grpc.ClientUnaryCall; - block(argument: _google_showcase_v1beta1_BlockRequest, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_BlockResponse__Output) => void): grpc.ClientUnaryCall; + block(argument: I_google_showcase_v1beta1_BlockRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + block(argument: I_google_showcase_v1beta1_BlockRequest, metadata: grpc.Metadata, callback: grpc.requestCallback): grpc.ClientUnaryCall; + block(argument: I_google_showcase_v1beta1_BlockRequest, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + block(argument: I_google_showcase_v1beta1_BlockRequest, callback: grpc.requestCallback): grpc.ClientUnaryCall; /** * This method, upon receiving a request on the stream, the same content will * be passed back on the stream. This method showcases bidirectional * streaming rpcs. */ - Chat(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_google_showcase_v1beta1_EchoRequest, _google_showcase_v1beta1_EchoResponse__Output>; - Chat(options?: grpc.CallOptions): grpc.ClientDuplexStream<_google_showcase_v1beta1_EchoRequest, _google_showcase_v1beta1_EchoResponse__Output>; + Chat(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream; + Chat(options?: grpc.CallOptions): grpc.ClientDuplexStream; /** * This method, upon receiving a request on the stream, the same content will * be passed back on the stream. This method showcases bidirectional * streaming rpcs. */ - chat(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream<_google_showcase_v1beta1_EchoRequest, _google_showcase_v1beta1_EchoResponse__Output>; - chat(options?: grpc.CallOptions): grpc.ClientDuplexStream<_google_showcase_v1beta1_EchoRequest, _google_showcase_v1beta1_EchoResponse__Output>; + chat(metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientDuplexStream; + chat(options?: grpc.CallOptions): grpc.ClientDuplexStream; /** * This method will collect the words given to it. When the stream is closed * by the client, this method will return the a concatenation of the strings * passed to it. This method showcases client-side streaming rpcs. */ - Collect(metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_EchoResponse__Output) => void): grpc.ClientWritableStream<_google_showcase_v1beta1_EchoRequest>; - Collect(metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_EchoResponse__Output) => void): grpc.ClientWritableStream<_google_showcase_v1beta1_EchoRequest>; - Collect(options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_EchoResponse__Output) => void): grpc.ClientWritableStream<_google_showcase_v1beta1_EchoRequest>; - Collect(callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_EchoResponse__Output) => void): grpc.ClientWritableStream<_google_showcase_v1beta1_EchoRequest>; + Collect(metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientWritableStream; + Collect(metadata: grpc.Metadata, callback: grpc.requestCallback): grpc.ClientWritableStream; + Collect(options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientWritableStream; + Collect(callback: grpc.requestCallback): grpc.ClientWritableStream; /** * This method will collect the words given to it. When the stream is closed * by the client, this method will return the a concatenation of the strings * passed to it. This method showcases client-side streaming rpcs. */ - collect(metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_EchoResponse__Output) => void): grpc.ClientWritableStream<_google_showcase_v1beta1_EchoRequest>; - collect(metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_EchoResponse__Output) => void): grpc.ClientWritableStream<_google_showcase_v1beta1_EchoRequest>; - collect(options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_EchoResponse__Output) => void): grpc.ClientWritableStream<_google_showcase_v1beta1_EchoRequest>; - collect(callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_EchoResponse__Output) => void): grpc.ClientWritableStream<_google_showcase_v1beta1_EchoRequest>; + collect(metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientWritableStream; + collect(metadata: grpc.Metadata, callback: grpc.requestCallback): grpc.ClientWritableStream; + collect(options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientWritableStream; + collect(callback: grpc.requestCallback): grpc.ClientWritableStream; /** * This method simply echos the request. This method is showcases unary rpcs. */ - Echo(argument: _google_showcase_v1beta1_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_EchoResponse__Output) => void): grpc.ClientUnaryCall; - Echo(argument: _google_showcase_v1beta1_EchoRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_EchoResponse__Output) => void): grpc.ClientUnaryCall; - Echo(argument: _google_showcase_v1beta1_EchoRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_EchoResponse__Output) => void): grpc.ClientUnaryCall; - Echo(argument: _google_showcase_v1beta1_EchoRequest, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_EchoResponse__Output) => void): grpc.ClientUnaryCall; + Echo(argument: I_google_showcase_v1beta1_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + Echo(argument: I_google_showcase_v1beta1_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback): grpc.ClientUnaryCall; + Echo(argument: I_google_showcase_v1beta1_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + Echo(argument: I_google_showcase_v1beta1_EchoRequest, callback: grpc.requestCallback): grpc.ClientUnaryCall; /** * This method simply echos the request. This method is showcases unary rpcs. */ - echo(argument: _google_showcase_v1beta1_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_EchoResponse__Output) => void): grpc.ClientUnaryCall; - echo(argument: _google_showcase_v1beta1_EchoRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_EchoResponse__Output) => void): grpc.ClientUnaryCall; - echo(argument: _google_showcase_v1beta1_EchoRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_EchoResponse__Output) => void): grpc.ClientUnaryCall; - echo(argument: _google_showcase_v1beta1_EchoRequest, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_EchoResponse__Output) => void): grpc.ClientUnaryCall; + echo(argument: I_google_showcase_v1beta1_EchoRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + echo(argument: I_google_showcase_v1beta1_EchoRequest, metadata: grpc.Metadata, callback: grpc.requestCallback): grpc.ClientUnaryCall; + echo(argument: I_google_showcase_v1beta1_EchoRequest, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + echo(argument: I_google_showcase_v1beta1_EchoRequest, callback: grpc.requestCallback): grpc.ClientUnaryCall; /** * This method split the given content into words and will pass each word back * through the stream. This method showcases server-side streaming rpcs. */ - Expand(argument: _google_showcase_v1beta1_ExpandRequest, metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientReadableStream<_google_showcase_v1beta1_EchoResponse__Output>; - Expand(argument: _google_showcase_v1beta1_ExpandRequest, options?: grpc.CallOptions): grpc.ClientReadableStream<_google_showcase_v1beta1_EchoResponse__Output>; + Expand(argument: I_google_showcase_v1beta1_ExpandRequest, metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientReadableStream; + Expand(argument: I_google_showcase_v1beta1_ExpandRequest, options?: grpc.CallOptions): grpc.ClientReadableStream; /** * This method split the given content into words and will pass each word back * through the stream. This method showcases server-side streaming rpcs. */ - expand(argument: _google_showcase_v1beta1_ExpandRequest, metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientReadableStream<_google_showcase_v1beta1_EchoResponse__Output>; - expand(argument: _google_showcase_v1beta1_ExpandRequest, options?: grpc.CallOptions): grpc.ClientReadableStream<_google_showcase_v1beta1_EchoResponse__Output>; + expand(argument: I_google_showcase_v1beta1_ExpandRequest, metadata: grpc.Metadata, options?: grpc.CallOptions): grpc.ClientReadableStream; + expand(argument: I_google_showcase_v1beta1_ExpandRequest, options?: grpc.CallOptions): grpc.ClientReadableStream; /** * This is similar to the Expand method but instead of returning a stream of * expanded words, this method returns a paged list of expanded words. */ - PagedExpand(argument: _google_showcase_v1beta1_PagedExpandRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_PagedExpandResponse__Output) => void): grpc.ClientUnaryCall; - PagedExpand(argument: _google_showcase_v1beta1_PagedExpandRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_PagedExpandResponse__Output) => void): grpc.ClientUnaryCall; - PagedExpand(argument: _google_showcase_v1beta1_PagedExpandRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_PagedExpandResponse__Output) => void): grpc.ClientUnaryCall; - PagedExpand(argument: _google_showcase_v1beta1_PagedExpandRequest, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_PagedExpandResponse__Output) => void): grpc.ClientUnaryCall; + PagedExpand(argument: I_google_showcase_v1beta1_PagedExpandRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + PagedExpand(argument: I_google_showcase_v1beta1_PagedExpandRequest, metadata: grpc.Metadata, callback: grpc.requestCallback): grpc.ClientUnaryCall; + PagedExpand(argument: I_google_showcase_v1beta1_PagedExpandRequest, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + PagedExpand(argument: I_google_showcase_v1beta1_PagedExpandRequest, callback: grpc.requestCallback): grpc.ClientUnaryCall; /** * This is similar to the Expand method but instead of returning a stream of * expanded words, this method returns a paged list of expanded words. */ - pagedExpand(argument: _google_showcase_v1beta1_PagedExpandRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_PagedExpandResponse__Output) => void): grpc.ClientUnaryCall; - pagedExpand(argument: _google_showcase_v1beta1_PagedExpandRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_PagedExpandResponse__Output) => void): grpc.ClientUnaryCall; - pagedExpand(argument: _google_showcase_v1beta1_PagedExpandRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_PagedExpandResponse__Output) => void): grpc.ClientUnaryCall; - pagedExpand(argument: _google_showcase_v1beta1_PagedExpandRequest, callback: (error?: grpc.ServiceError, result?: _google_showcase_v1beta1_PagedExpandResponse__Output) => void): grpc.ClientUnaryCall; + pagedExpand(argument: I_google_showcase_v1beta1_PagedExpandRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + pagedExpand(argument: I_google_showcase_v1beta1_PagedExpandRequest, metadata: grpc.Metadata, callback: grpc.requestCallback): grpc.ClientUnaryCall; + pagedExpand(argument: I_google_showcase_v1beta1_PagedExpandRequest, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + pagedExpand(argument: I_google_showcase_v1beta1_PagedExpandRequest, callback: grpc.requestCallback): grpc.ClientUnaryCall; /** * This method will wait the requested amount of and then return. * This method showcases how a client handles a request timing out. */ - Wait(argument: _google_showcase_v1beta1_WaitRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; - Wait(argument: _google_showcase_v1beta1_WaitRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; - Wait(argument: _google_showcase_v1beta1_WaitRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; - Wait(argument: _google_showcase_v1beta1_WaitRequest, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; + Wait(argument: I_google_showcase_v1beta1_WaitRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + Wait(argument: I_google_showcase_v1beta1_WaitRequest, metadata: grpc.Metadata, callback: grpc.requestCallback): grpc.ClientUnaryCall; + Wait(argument: I_google_showcase_v1beta1_WaitRequest, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + Wait(argument: I_google_showcase_v1beta1_WaitRequest, callback: grpc.requestCallback): grpc.ClientUnaryCall; /** * This method will wait the requested amount of and then return. * This method showcases how a client handles a request timing out. */ - wait(argument: _google_showcase_v1beta1_WaitRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; - wait(argument: _google_showcase_v1beta1_WaitRequest, metadata: grpc.Metadata, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; - wait(argument: _google_showcase_v1beta1_WaitRequest, options: grpc.CallOptions, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; - wait(argument: _google_showcase_v1beta1_WaitRequest, callback: (error?: grpc.ServiceError, result?: _google_longrunning_Operation__Output) => void): grpc.ClientUnaryCall; + wait(argument: I_google_showcase_v1beta1_WaitRequest, metadata: grpc.Metadata, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + wait(argument: I_google_showcase_v1beta1_WaitRequest, metadata: grpc.Metadata, callback: grpc.requestCallback): grpc.ClientUnaryCall; + wait(argument: I_google_showcase_v1beta1_WaitRequest, options: grpc.CallOptions, callback: grpc.requestCallback): grpc.ClientUnaryCall; + wait(argument: I_google_showcase_v1beta1_WaitRequest, callback: grpc.requestCallback): grpc.ClientUnaryCall; } @@ -150,53 +150,53 @@ export interface EchoHandlers extends grpc.UntypedServiceImplementation { * and then return the response or error. * This method showcases how a client handles delays or retries. */ - Block: grpc.handleUnaryCall<_google_showcase_v1beta1_BlockRequest__Output, _google_showcase_v1beta1_BlockResponse>; + Block: grpc.handleUnaryCall; /** * This method, upon receiving a request on the stream, the same content will * be passed back on the stream. This method showcases bidirectional * streaming rpcs. */ - Chat: grpc.handleBidiStreamingCall<_google_showcase_v1beta1_EchoRequest__Output, _google_showcase_v1beta1_EchoResponse>; + Chat: grpc.handleBidiStreamingCall; /** * This method will collect the words given to it. When the stream is closed * by the client, this method will return the a concatenation of the strings * passed to it. This method showcases client-side streaming rpcs. */ - Collect: grpc.handleClientStreamingCall<_google_showcase_v1beta1_EchoRequest__Output, _google_showcase_v1beta1_EchoResponse>; + Collect: grpc.handleClientStreamingCall; /** * This method simply echos the request. This method is showcases unary rpcs. */ - Echo: grpc.handleUnaryCall<_google_showcase_v1beta1_EchoRequest__Output, _google_showcase_v1beta1_EchoResponse>; + Echo: grpc.handleUnaryCall; /** * This method split the given content into words and will pass each word back * through the stream. This method showcases server-side streaming rpcs. */ - Expand: grpc.handleServerStreamingCall<_google_showcase_v1beta1_ExpandRequest__Output, _google_showcase_v1beta1_EchoResponse>; + Expand: grpc.handleServerStreamingCall; /** * This is similar to the Expand method but instead of returning a stream of * expanded words, this method returns a paged list of expanded words. */ - PagedExpand: grpc.handleUnaryCall<_google_showcase_v1beta1_PagedExpandRequest__Output, _google_showcase_v1beta1_PagedExpandResponse>; + PagedExpand: grpc.handleUnaryCall; /** * This method will wait the requested amount of and then return. * This method showcases how a client handles a request timing out. */ - Wait: grpc.handleUnaryCall<_google_showcase_v1beta1_WaitRequest__Output, _google_longrunning_Operation>; + Wait: grpc.handleUnaryCall; } -export interface EchoDefinition { - Block: MethodDefinition<_google_showcase_v1beta1_BlockRequest, _google_showcase_v1beta1_BlockResponse, _google_showcase_v1beta1_BlockRequest__Output, _google_showcase_v1beta1_BlockResponse__Output> - Chat: MethodDefinition<_google_showcase_v1beta1_EchoRequest, _google_showcase_v1beta1_EchoResponse, _google_showcase_v1beta1_EchoRequest__Output, _google_showcase_v1beta1_EchoResponse__Output> - Collect: MethodDefinition<_google_showcase_v1beta1_EchoRequest, _google_showcase_v1beta1_EchoResponse, _google_showcase_v1beta1_EchoRequest__Output, _google_showcase_v1beta1_EchoResponse__Output> - Echo: MethodDefinition<_google_showcase_v1beta1_EchoRequest, _google_showcase_v1beta1_EchoResponse, _google_showcase_v1beta1_EchoRequest__Output, _google_showcase_v1beta1_EchoResponse__Output> - Expand: MethodDefinition<_google_showcase_v1beta1_ExpandRequest, _google_showcase_v1beta1_EchoResponse, _google_showcase_v1beta1_ExpandRequest__Output, _google_showcase_v1beta1_EchoResponse__Output> - PagedExpand: MethodDefinition<_google_showcase_v1beta1_PagedExpandRequest, _google_showcase_v1beta1_PagedExpandResponse, _google_showcase_v1beta1_PagedExpandRequest__Output, _google_showcase_v1beta1_PagedExpandResponse__Output> - Wait: MethodDefinition<_google_showcase_v1beta1_WaitRequest, _google_longrunning_Operation, _google_showcase_v1beta1_WaitRequest__Output, _google_longrunning_Operation__Output> +export interface EchoDefinition extends grpc.ServiceDefinition { + Block: MethodDefinition + Chat: MethodDefinition + Collect: MethodDefinition + Echo: MethodDefinition + Expand: MethodDefinition + PagedExpand: MethodDefinition + Wait: MethodDefinition } diff --git a/packages/proto-loader/golden-generated/google/showcase/v1beta1/EchoRequest.ts b/packages/proto-loader/golden-generated/google/showcase/v1beta1/EchoRequest.ts index fb2bb67d3..a5fb8f766 100644 --- a/packages/proto-loader/golden-generated/google/showcase/v1beta1/EchoRequest.ts +++ b/packages/proto-loader/golden-generated/google/showcase/v1beta1/EchoRequest.ts @@ -1,7 +1,7 @@ // Original file: deps/gapic-showcase/schema/google/showcase/v1beta1/echo.proto -import type { Status as _google_rpc_Status, Status__Output as _google_rpc_Status__Output } from '../../../google/rpc/Status'; -import type { Severity as _google_showcase_v1beta1_Severity } from '../../../google/showcase/v1beta1/Severity'; +import type { IStatus as I_google_rpc_Status, OStatus as O_google_rpc_Status } from '../../../google/rpc/Status'; +import type { ISeverity as I_google_showcase_v1beta1_Severity, OSeverity as O_google_showcase_v1beta1_Severity } from '../../../google/showcase/v1beta1/Severity'; /** * The request message used for the Echo, Collect and Chat methods. @@ -9,7 +9,7 @@ import type { Severity as _google_showcase_v1beta1_Severity } from '../../../goo * If status is set in this message * then the status will be returned as an error. */ -export interface EchoRequest { +export interface IEchoRequest { /** * The content to be echoed by the server. */ @@ -17,11 +17,11 @@ export interface EchoRequest { /** * The error to be thrown by the server. */ - 'error'?: (_google_rpc_Status | null); + 'error'?: (I_google_rpc_Status | null); /** * The severity to be echoed by the server. */ - 'severity'?: (_google_showcase_v1beta1_Severity | keyof typeof _google_showcase_v1beta1_Severity); + 'severity'?: (I_google_showcase_v1beta1_Severity); 'response'?: "content"|"error"; } @@ -31,7 +31,7 @@ export interface EchoRequest { * If status is set in this message * then the status will be returned as an error. */ -export interface EchoRequest__Output { +export interface OEchoRequest { /** * The content to be echoed by the server. */ @@ -39,10 +39,10 @@ export interface EchoRequest__Output { /** * The error to be thrown by the server. */ - 'error'?: (_google_rpc_Status__Output | null); + 'error'?: (O_google_rpc_Status | null); /** * The severity to be echoed by the server. */ - 'severity': (keyof typeof _google_showcase_v1beta1_Severity); + 'severity': (O_google_showcase_v1beta1_Severity); 'response': "content"|"error"; } diff --git a/packages/proto-loader/golden-generated/google/showcase/v1beta1/EchoResponse.ts b/packages/proto-loader/golden-generated/google/showcase/v1beta1/EchoResponse.ts index 3fda238a1..ac50115bf 100644 --- a/packages/proto-loader/golden-generated/google/showcase/v1beta1/EchoResponse.ts +++ b/packages/proto-loader/golden-generated/google/showcase/v1beta1/EchoResponse.ts @@ -1,11 +1,11 @@ // Original file: deps/gapic-showcase/schema/google/showcase/v1beta1/echo.proto -import type { Severity as _google_showcase_v1beta1_Severity } from '../../../google/showcase/v1beta1/Severity'; +import type { ISeverity as I_google_showcase_v1beta1_Severity, OSeverity as O_google_showcase_v1beta1_Severity } from '../../../google/showcase/v1beta1/Severity'; /** * The response message for the Echo methods. */ -export interface EchoResponse { +export interface IEchoResponse { /** * The content specified in the request. */ @@ -13,13 +13,13 @@ export interface EchoResponse { /** * The severity specified in the request. */ - 'severity'?: (_google_showcase_v1beta1_Severity | keyof typeof _google_showcase_v1beta1_Severity); + 'severity'?: (I_google_showcase_v1beta1_Severity); } /** * The response message for the Echo methods. */ -export interface EchoResponse__Output { +export interface OEchoResponse { /** * The content specified in the request. */ @@ -27,5 +27,5 @@ export interface EchoResponse__Output { /** * The severity specified in the request. */ - 'severity': (keyof typeof _google_showcase_v1beta1_Severity); + 'severity': (O_google_showcase_v1beta1_Severity); } diff --git a/packages/proto-loader/golden-generated/google/showcase/v1beta1/ExpandRequest.ts b/packages/proto-loader/golden-generated/google/showcase/v1beta1/ExpandRequest.ts index 33ce73c1f..4347a617a 100644 --- a/packages/proto-loader/golden-generated/google/showcase/v1beta1/ExpandRequest.ts +++ b/packages/proto-loader/golden-generated/google/showcase/v1beta1/ExpandRequest.ts @@ -1,11 +1,11 @@ // Original file: deps/gapic-showcase/schema/google/showcase/v1beta1/echo.proto -import type { Status as _google_rpc_Status, Status__Output as _google_rpc_Status__Output } from '../../../google/rpc/Status'; +import type { IStatus as I_google_rpc_Status, OStatus as O_google_rpc_Status } from '../../../google/rpc/Status'; /** * The request message for the Expand method. */ -export interface ExpandRequest { +export interface IExpandRequest { /** * The content that will be split into words and returned on the stream. */ @@ -13,13 +13,13 @@ export interface ExpandRequest { /** * The error that is thrown after all words are sent on the stream. */ - 'error'?: (_google_rpc_Status | null); + 'error'?: (I_google_rpc_Status | null); } /** * The request message for the Expand method. */ -export interface ExpandRequest__Output { +export interface OExpandRequest { /** * The content that will be split into words and returned on the stream. */ @@ -27,5 +27,5 @@ export interface ExpandRequest__Output { /** * The error that is thrown after all words are sent on the stream. */ - 'error': (_google_rpc_Status__Output | null); + 'error': (O_google_rpc_Status | null); } diff --git a/packages/proto-loader/golden-generated/google/showcase/v1beta1/PagedExpandRequest.ts b/packages/proto-loader/golden-generated/google/showcase/v1beta1/PagedExpandRequest.ts index 13c945134..8c68ba990 100644 --- a/packages/proto-loader/golden-generated/google/showcase/v1beta1/PagedExpandRequest.ts +++ b/packages/proto-loader/golden-generated/google/showcase/v1beta1/PagedExpandRequest.ts @@ -4,7 +4,7 @@ /** * The request for the PagedExpand method. */ -export interface PagedExpandRequest { +export interface IPagedExpandRequest { /** * The string to expand. */ @@ -22,7 +22,7 @@ export interface PagedExpandRequest { /** * The request for the PagedExpand method. */ -export interface PagedExpandRequest__Output { +export interface OPagedExpandRequest { /** * The string to expand. */ diff --git a/packages/proto-loader/golden-generated/google/showcase/v1beta1/PagedExpandResponse.ts b/packages/proto-loader/golden-generated/google/showcase/v1beta1/PagedExpandResponse.ts index 823de43ed..3b3ef90c2 100644 --- a/packages/proto-loader/golden-generated/google/showcase/v1beta1/PagedExpandResponse.ts +++ b/packages/proto-loader/golden-generated/google/showcase/v1beta1/PagedExpandResponse.ts @@ -1,15 +1,15 @@ // Original file: deps/gapic-showcase/schema/google/showcase/v1beta1/echo.proto -import type { EchoResponse as _google_showcase_v1beta1_EchoResponse, EchoResponse__Output as _google_showcase_v1beta1_EchoResponse__Output } from '../../../google/showcase/v1beta1/EchoResponse'; +import type { IEchoResponse as I_google_showcase_v1beta1_EchoResponse, OEchoResponse as O_google_showcase_v1beta1_EchoResponse } from '../../../google/showcase/v1beta1/EchoResponse'; /** * The response for the PagedExpand method. */ -export interface PagedExpandResponse { +export interface IPagedExpandResponse { /** * The words that were expanded. */ - 'responses'?: (_google_showcase_v1beta1_EchoResponse)[]; + 'responses'?: (I_google_showcase_v1beta1_EchoResponse)[]; /** * The next page token. */ @@ -19,11 +19,11 @@ export interface PagedExpandResponse { /** * The response for the PagedExpand method. */ -export interface PagedExpandResponse__Output { +export interface OPagedExpandResponse { /** * The words that were expanded. */ - 'responses': (_google_showcase_v1beta1_EchoResponse__Output)[]; + 'responses': (O_google_showcase_v1beta1_EchoResponse)[]; /** * The next page token. */ diff --git a/packages/proto-loader/golden-generated/google/showcase/v1beta1/Severity.ts b/packages/proto-loader/golden-generated/google/showcase/v1beta1/Severity.ts index fc3fe6415..d109fe1ce 100644 --- a/packages/proto-loader/golden-generated/google/showcase/v1beta1/Severity.ts +++ b/packages/proto-loader/golden-generated/google/showcase/v1beta1/Severity.ts @@ -3,9 +3,27 @@ /** * A severity enum used to test enum capabilities in GAPIC surfaces */ -export enum Severity { - UNNECESSARY = 0, - NECESSARY = 1, - URGENT = 2, - CRITICAL = 3, -} +export const Severity = { + UNNECESSARY: 'UNNECESSARY', + NECESSARY: 'NECESSARY', + URGENT: 'URGENT', + CRITICAL: 'CRITICAL', +} as const; + +/** + * A severity enum used to test enum capabilities in GAPIC surfaces + */ +export type ISeverity = + | 'UNNECESSARY' + | 0 + | 'NECESSARY' + | 1 + | 'URGENT' + | 2 + | 'CRITICAL' + | 3 + +/** + * A severity enum used to test enum capabilities in GAPIC surfaces + */ +export type OSeverity = typeof Severity[keyof typeof Severity] diff --git a/packages/proto-loader/golden-generated/google/showcase/v1beta1/WaitMetadata.ts b/packages/proto-loader/golden-generated/google/showcase/v1beta1/WaitMetadata.ts index 5f17b4457..ddbe77c22 100644 --- a/packages/proto-loader/golden-generated/google/showcase/v1beta1/WaitMetadata.ts +++ b/packages/proto-loader/golden-generated/google/showcase/v1beta1/WaitMetadata.ts @@ -1,23 +1,23 @@ // Original file: deps/gapic-showcase/schema/google/showcase/v1beta1/echo.proto -import type { Timestamp as _google_protobuf_Timestamp, Timestamp__Output as _google_protobuf_Timestamp__Output } from '../../../google/protobuf/Timestamp'; +import type { ITimestamp as I_google_protobuf_Timestamp, OTimestamp as O_google_protobuf_Timestamp } from '../../../google/protobuf/Timestamp'; /** * The metadata for Wait operation. */ -export interface WaitMetadata { +export interface IWaitMetadata { /** * The time that this operation will complete. */ - 'end_time'?: (_google_protobuf_Timestamp | null); + 'end_time'?: (I_google_protobuf_Timestamp | null); } /** * The metadata for Wait operation. */ -export interface WaitMetadata__Output { +export interface OWaitMetadata { /** * The time that this operation will complete. */ - 'end_time': (_google_protobuf_Timestamp__Output | null); + 'end_time': (O_google_protobuf_Timestamp | null); } diff --git a/packages/proto-loader/golden-generated/google/showcase/v1beta1/WaitRequest.ts b/packages/proto-loader/golden-generated/google/showcase/v1beta1/WaitRequest.ts index 46c095b65..331a66947 100644 --- a/packages/proto-loader/golden-generated/google/showcase/v1beta1/WaitRequest.ts +++ b/packages/proto-loader/golden-generated/google/showcase/v1beta1/WaitRequest.ts @@ -1,31 +1,31 @@ // Original file: deps/gapic-showcase/schema/google/showcase/v1beta1/echo.proto -import type { Timestamp as _google_protobuf_Timestamp, Timestamp__Output as _google_protobuf_Timestamp__Output } from '../../../google/protobuf/Timestamp'; -import type { Status as _google_rpc_Status, Status__Output as _google_rpc_Status__Output } from '../../../google/rpc/Status'; -import type { WaitResponse as _google_showcase_v1beta1_WaitResponse, WaitResponse__Output as _google_showcase_v1beta1_WaitResponse__Output } from '../../../google/showcase/v1beta1/WaitResponse'; -import type { Duration as _google_protobuf_Duration, Duration__Output as _google_protobuf_Duration__Output } from '../../../google/protobuf/Duration'; +import type { ITimestamp as I_google_protobuf_Timestamp, OTimestamp as O_google_protobuf_Timestamp } from '../../../google/protobuf/Timestamp'; +import type { IStatus as I_google_rpc_Status, OStatus as O_google_rpc_Status } from '../../../google/rpc/Status'; +import type { IWaitResponse as I_google_showcase_v1beta1_WaitResponse, OWaitResponse as O_google_showcase_v1beta1_WaitResponse } from '../../../google/showcase/v1beta1/WaitResponse'; +import type { IDuration as I_google_protobuf_Duration, ODuration as O_google_protobuf_Duration } from '../../../google/protobuf/Duration'; /** * The request for Wait method. */ -export interface WaitRequest { +export interface IWaitRequest { /** * The time that this operation will complete. */ - 'end_time'?: (_google_protobuf_Timestamp | null); + 'end_time'?: (I_google_protobuf_Timestamp | null); /** * The error that will be returned by the server. If this code is specified * to be the OK rpc code, an empty response will be returned. */ - 'error'?: (_google_rpc_Status | null); + 'error'?: (I_google_rpc_Status | null); /** * The response to be returned on operation completion. */ - 'success'?: (_google_showcase_v1beta1_WaitResponse | null); + 'success'?: (I_google_showcase_v1beta1_WaitResponse | null); /** * The duration of this operation. */ - 'ttl'?: (_google_protobuf_Duration | null); + 'ttl'?: (I_google_protobuf_Duration | null); 'end'?: "end_time"|"ttl"; 'response'?: "error"|"success"; } @@ -33,24 +33,24 @@ export interface WaitRequest { /** * The request for Wait method. */ -export interface WaitRequest__Output { +export interface OWaitRequest { /** * The time that this operation will complete. */ - 'end_time'?: (_google_protobuf_Timestamp__Output | null); + 'end_time'?: (O_google_protobuf_Timestamp | null); /** * The error that will be returned by the server. If this code is specified * to be the OK rpc code, an empty response will be returned. */ - 'error'?: (_google_rpc_Status__Output | null); + 'error'?: (O_google_rpc_Status | null); /** * The response to be returned on operation completion. */ - 'success'?: (_google_showcase_v1beta1_WaitResponse__Output | null); + 'success'?: (O_google_showcase_v1beta1_WaitResponse | null); /** * The duration of this operation. */ - 'ttl'?: (_google_protobuf_Duration__Output | null); + 'ttl'?: (O_google_protobuf_Duration | null); 'end': "end_time"|"ttl"; 'response': "error"|"success"; } diff --git a/packages/proto-loader/golden-generated/google/showcase/v1beta1/WaitResponse.ts b/packages/proto-loader/golden-generated/google/showcase/v1beta1/WaitResponse.ts index 84b804f6b..667b450e2 100644 --- a/packages/proto-loader/golden-generated/google/showcase/v1beta1/WaitResponse.ts +++ b/packages/proto-loader/golden-generated/google/showcase/v1beta1/WaitResponse.ts @@ -4,7 +4,7 @@ /** * The result of the Wait operation. */ -export interface WaitResponse { +export interface IWaitResponse { /** * This content of the result. */ @@ -14,7 +14,7 @@ export interface WaitResponse { /** * The result of the Wait operation. */ -export interface WaitResponse__Output { +export interface OWaitResponse { /** * This content of the result. */ diff --git a/packages/proto-loader/package.json b/packages/proto-loader/package.json index ee49abc40..a7f4d159f 100644 --- a/packages/proto-loader/package.json +++ b/packages/proto-loader/package.json @@ -1,6 +1,6 @@ { "name": "@grpc/proto-loader", - "version": "0.6.1", + "version": "0.7.10", "author": "Google Inc.", "contributors": [ { @@ -14,7 +14,7 @@ "typings": "build/src/index.d.ts", "scripts": { "build": "npm run compile", - "clean": "node -e 'require(\"rimraf\")(\"./build\", () => {})'", + "clean": "rimraf ./build", "compile": "tsc -p .", "format": "clang-format -i -style=\"{Language: JavaScript, BasedOnStyle: Google, ColumnLimit: 80}\" src/*.ts test/*.ts", "lint": "tslint -c node_modules/google-ts-style/tslint.json -p . -t codeFrame --type-check", @@ -24,8 +24,8 @@ "fix": "gts fix", "pretest": "npm run compile", "posttest": "npm run check", - "generate-golden": "node ./build/bin/proto-loader-gen-types.js --keepCase --longs=String --enums=String --defaults --oneofs --json --includeComments -I deps/gapic-showcase/schema/ deps/googleapis/ -O ./golden-generated --grpcLib @grpc/grpc-js google/showcase/v1beta1/echo.proto", - "validate-golden": "rm -rf ./golden-generated-old && mv ./golden-generated/ ./golden-generated-old && npm run generate-golden && diff -r ./golden-generated ./golden-generated-old" + "generate-golden": "node ./build/bin/proto-loader-gen-types.js --keepCase --longs=String --enums=String --defaults --oneofs --json --includeComments --inputTemplate=I%s --outputTemplate=O%s -I deps/gapic-showcase/schema/ deps/googleapis/ -O ./golden-generated --grpcLib @grpc/grpc-js google/showcase/v1beta1/echo.proto", + "validate-golden": "rm -rf ./golden-generated-old && mv ./golden-generated/ ./golden-generated-old && npm run generate-golden && diff -rb ./golden-generated ./golden-generated-old" }, "repository": { "type": "git", @@ -38,29 +38,28 @@ "files": [ "LICENSE", "build/src/*.d.ts", - "build/src/*.js", - "build/bin/*.js" + "build/src/*.{js,js.map}", + "build/bin/*.{js,js.map}" ], "bin": { "proto-loader-gen-types": "./build/bin/proto-loader-gen-types.js" }, "dependencies": { - "@types/long": "^4.0.1", "lodash.camelcase": "^4.3.0", - "long": "^4.0.0", - "protobufjs": "^6.10.0", - "yargs": "^16.1.1" + "long": "^5.0.0", + "protobufjs": "^7.2.4", + "yargs": "^17.7.2" }, "devDependencies": { "@types/lodash.camelcase": "^4.3.4", "@types/mkdirp": "^1.0.1", "@types/mocha": "^5.2.7", "@types/node": "^10.17.26", - "@types/yargs": "^15.0.5", + "@types/yargs": "^17.0.24", "clang-format": "^1.2.2", - "gts": "^1.1.0", + "gts": "^3.1.0", "rimraf": "^3.0.2", - "typescript": "~3.8.3" + "typescript": "~4.7.4" }, "engines": { "node": ">=6" diff --git a/packages/proto-loader/src/index.ts b/packages/proto-loader/src/index.ts index 98ca97b51..d607668a9 100644 --- a/packages/proto-loader/src/index.ts +++ b/packages/proto-loader/src/index.ts @@ -22,9 +22,9 @@ import * as descriptor from 'protobufjs/ext/descriptor'; import { loadProtosWithOptionsSync, loadProtosWithOptions, Options, addCommonProtos } from './util'; -export { Long } from 'long'; +import Long = require('long'); -export { Options }; +export { Options, Long }; /** * This type exists for use with code generated by the proto-loader-gen-types @@ -212,6 +212,9 @@ function createDeserializer( function createSerializer(cls: Protobuf.Type): Serialize { return function serialize(arg: object): Buffer { + if (Array.isArray(arg)) { + throw new Error(`Failed to serialize message: expected object with ${cls.name} structure, got array instead`); + } const message = cls.fromObject(arg); return cls.encode(message).finish() as Buffer; }; diff --git a/run-tests.bat b/run-tests.bat index 1eebbd4a6..6b94b78de 100644 --- a/run-tests.bat +++ b/run-tests.bat @@ -21,15 +21,17 @@ powershell -c "[System.Environment]::OSVersion" powershell -c "Get-WmiObject -Class Win32_ComputerSystem" powershell -c "(Get-WmiObject -Class Win32_ComputerSystem).SystemType" -powershell -c "& { iwr https://raw.githubusercontent.com/grumpycoders/nvm-ps/master/nvm.ps1 | iex }" +powershell -c "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; & { iwr https://raw.githubusercontent.com/grumpycoders/nvm-ps/master/nvm.ps1 | iex }" SET PATH=%APPDATA%\nvm-ps;%APPDATA%\nvm-ps\nodejs;%PATH% SET JOBS=8 call nvm version -call nvm install 8 -call nvm use 8 +call nvm install 16 +call nvm use 16 + +git submodule update --init --recursive SET npm_config_fetch_retries=5 @@ -38,7 +40,7 @@ call npm install || goto :error SET JUNIT_REPORT_STACK=1 SET FAILED=0 -for %%v in (8 10 12) do ( +for %%v in (14 16) do ( call nvm install %%v call nvm use %%v if "%%v"=="4" ( @@ -53,7 +55,6 @@ for %%v in (8 10 12) do ( node -e "process.exit(process.version.startsWith('v%%v') ? 0 : -1)" || goto :error - call .\node_modules\.bin\gulp cleanAll || SET FAILED=1 call .\node_modules\.bin\gulp setup || SET FAILED=1 call .\node_modules\.bin\gulp test || SET FAILED=1 cmd.exe /c "SET GRPC_DNS_RESOLVER=ares& call .\node_modules\.bin\gulp nativeTestOnly" || SET FAILED=1 diff --git a/run-tests.sh b/run-tests.sh index c4bffacda..0adcc0f17 100755 --- a/run-tests.sh +++ b/run-tests.sh @@ -28,7 +28,7 @@ cd $ROOT git submodule update --init --recursive if [ ! -n "$node_versions" ] ; then - node_versions="8 10 12" + node_versions="14 16" fi set +ex @@ -46,6 +46,7 @@ export JOBS=8 export JUNIT_REPORT_STACK=1 OS=$(uname) +ARCH=$(uname -m) # TODO(mlumish): Add electron tests @@ -70,11 +71,12 @@ do node -e 'process.exit(process.version.startsWith("v'$version'") ? 0 : -1)' # Install dependencies and link packages together. - ./node_modules/.bin/gulp cleanAll ./node_modules/.bin/gulp setup # npm test calls nyc gulp test npm test || FAILED="true" + + ./test/distrib/run-distrib-test.sh || FAILED="true" done set +ex @@ -87,7 +89,7 @@ if [ "$FAILED" = "true" ] then exit 1 else - if [ "$OS" = "Linux" ] + if [ "$OS" = "Linux" ] && [ "$ARCH" != "aarch64"] then # If we can't download the token file, just skip reporting coverage gsutil cp gs://grpc-testing-secrets/coveralls_credentials/grpc-node.rc /tmp || exit 0 diff --git a/test/aarch64/prepare_qemu.sh b/test/aarch64/prepare_qemu.sh new file mode 100755 index 000000000..f61320222 --- /dev/null +++ b/test/aarch64/prepare_qemu.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# +# Setup and configure qemu userspace emulator on kokoro worker so that we can seamlessly emulate processes running +# inside docker containers. + +set -ex + +# show pre-existing qemu registration +cat /proc/sys/fs/binfmt_misc/qemu-aarch64 + +# Kokoro ubuntu1604 workers have already qemu-user and qemu-user-static packages installed, but it's and old version that: +# * prints warning about some syscalls (e.g "qemu: Unsupported syscall: 278") +# * doesn't register with binfmt_misc with the persistent ("F") flag we need (see below) +# +# To overcome the above limitations, we use the https://github.com/multiarch/qemu-user-static +# docker image to provide a new enough version of qemu-user-static and register it with +# the desired binfmt_misc flags. The most important flag we need is "F" (set by "--persistent yes"), +# which allows the qemu-aarch64-static binary to be loaded eagerly at the time of registration with binfmt_misc. +# That way, we can emulate aarch64 binaries running inside docker containers transparently, without needing the emulator +# binary to be accessible from the docker image we're emulating. +# Note that on newer distributions (such as glinux), simply "apt install qemu-user-static" is sufficient +# to install qemu-user-static with the right flags. +docker run --rm --privileged multiarch/qemu-user-static:5.2.0-2 --reset --credential yes --persistent yes + +# Print current qemu reqistration to make sure everything is setup correctly. +cat /proc/sys/fs/binfmt_misc/qemu-aarch64 diff --git a/test/any_grpc.js b/test/any_grpc.js index 2f161ff7a..5dcbf0111 100644 --- a/test/any_grpc.js +++ b/test/any_grpc.js @@ -22,10 +22,10 @@ const _ = require('lodash'); function getImplementation(globalField) { - const impl = global[globalField]; + const impl = global[globalField] ?? 'js'; if (impl === 'js') { - return require(`../packages/grpc-${impl}`); + return require('../packages/grpc-js'); } else if (impl === 'native') { return require('grpc'); } diff --git a/test/api/connectivity_test.js b/test/api/connectivity_test.js index b5d31943d..64764d7d4 100644 --- a/test/api/connectivity_test.js +++ b/test/api/connectivity_test.js @@ -61,7 +61,9 @@ const serviceImpl = { describe(`${anyGrpc.clientName} client -> ${anyGrpc.serverName} server`, function() { it('client should not wait for ready by default', function(done) { this.timeout(15000); - const disconnectedClient = new TestServiceClient('foo.test.google.com:50051', clientGrpc.credentials.createInsecure()); + /* TCP port 47 is reserved according to + * https://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers */ + const disconnectedClient = new TestServiceClient('localhost:47', clientGrpc.credentials.createInsecure()); const deadline = new Date(); deadline.setSeconds(deadline.getSeconds() + 10); disconnectedClient.unary({}, {deadline: deadline}, (error, value) =>{ @@ -72,7 +74,7 @@ describe(`${anyGrpc.clientName} client -> ${anyGrpc.serverName} server`, functio }); it('client should wait for a connection with waitForReady on', function(done) { this.timeout(15000); - const disconnectedClient = new TestServiceClient('foo.test.google.com:50051', clientGrpc.credentials.createInsecure()); + const disconnectedClient = new TestServiceClient('localhost:47', clientGrpc.credentials.createInsecure()); const metadata = new clientGrpc.Metadata({waitForReady: true}); const deadline = new Date(); deadline.setSeconds(deadline.getSeconds() + 10); diff --git a/test/api/error_test.js b/test/api/error_test.js index a99619fbd..4dbf1ada1 100644 --- a/test/api/error_test.js +++ b/test/api/error_test.js @@ -341,7 +341,7 @@ describe(`${anyGrpc.clientName} client -> ${anyGrpc.serverName} server`, functio after(function() { server.forceShutdown(); }); - describe('Server recieving bad input', function() { + describe('Server receiving bad input', function() { var misbehavingClient; var badArg = Buffer.from([0xFF]); before(function() { diff --git a/test/api/interop_extra_test.js b/test/api/interop_extra_test.js index f8db35b38..fa7e03276 100644 --- a/test/api/interop_extra_test.js +++ b/test/api/interop_extra_test.js @@ -147,7 +147,8 @@ describe(`${anyGrpc.clientName} client -> ${anyGrpc.serverName} server`, functio }); }); it('should receive all messages in a long stream', function(done) { - this.timeout(20000); + // the test is slow under aarch64 emulator + this.timeout(80000); var arg = { response_type: 'COMPRESSABLE', response_parameters: [ @@ -197,6 +198,8 @@ describe(`${anyGrpc.clientName} client -> ${anyGrpc.serverName} server`, functio }); }); describe('max message size', function() { + // with the default timeout the test times out under aarch64 emulator + this.timeout(6000); // A size that is larger than the default limit const largeMessageSize = 8 * 1024 * 1024; const largeMessage = Buffer.alloc(largeMessageSize); @@ -238,6 +241,8 @@ describe(`${anyGrpc.clientName} client -> ${anyGrpc.serverName} server`, functio }); }); describe('with a client with no message size limits', function() { + // with the default timeout the test times out under aarch64 emulator + this.timeout(6000); let unrestrictedClient; before(function() { const ca_path = path.join(__dirname, '../data/ca.pem'); @@ -283,6 +288,8 @@ describe(`${anyGrpc.clientName} client -> ${anyGrpc.serverName} server`, functio }); }); describe('with a server with message size limits and a client without limits', function() { + // with the default timeout the test times out under aarch64 emulator + this.timeout(6000); let restrictedServer; let restrictedServerClient; let restrictedServerClient2; diff --git a/test/api/interop_sanity_test.js b/test/api/interop_sanity_test.js index 995650e20..c1e5d92b5 100644 --- a/test/api/interop_sanity_test.js +++ b/test/api/interop_sanity_test.js @@ -48,7 +48,8 @@ var childExecArgv = []; describe(`${anyGrpc.clientName} client -> ${anyGrpc.serverName} server`, function() { describe('Interop tests', function() { - this.timeout(4000); + // with the default timeout the test times out under aarch64 emulator + this.timeout(10000); before(function(done) { for (let arg of process.argv) { if (arg.startsWith('--require=')) { diff --git a/test/channelz/channelz_manual_test.js b/test/channelz/channelz_manual_test.js new file mode 100644 index 000000000..2f77df3cc --- /dev/null +++ b/test/channelz/channelz_manual_test.js @@ -0,0 +1,73 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +'use strict'; + +require('../fixtures/js_js'); +const interopClient = require('../interop/interop_client'); +const interopServer = require('../interop/interop_server'); +const serverGrpc = require('../any_grpc').server; + +const hostOverride = 'foo.test.google.fr'; + +const testCases = [ + 'empty_unary', + 'large_unary', + 'client_streaming', + 'server_streaming', + 'ping_pong', + 'empty_stream', + 'cancel_after_begin', + 'cancel_after_first_response', + 'timeout_on_sleeping_server', + 'custom_metadata', + 'status_code_and_message', + 'special_status_message', + 'unimplemented_service', + 'unimplemented_method' +]; + +function getRandomTest() { + return testCases[(Math.random() * testCases.length) | 0]; +} + +let testCompleteCount = 0; + +interopServer.getServer('0', true, (error, result) => { + if (error) { + throw error; + } + const channelzServer = new serverGrpc.Server(); + channelzServer.bindAsync('localhost:0', serverGrpc.ServerCredentials.createInsecure(), (error, port) => { + if (error) { + throw error; + } + console.log(`Serving channelz at port ${port}`); + serverGrpc.addAdminServicesToServer(channelzServer); + channelzServer.start(); + result.server.start(); + setInterval(() => { + interopClient.runTest(`localhost:${result.port}`, hostOverride, getRandomTest(), true, true, () => { + testCompleteCount += 1; + if (testCompleteCount % 100 === 0) { + console.log(`Completed ${testCompleteCount} tests`); + } + }); + }, 100); + }); +}) \ No newline at end of file diff --git a/test/distrib/distrib-test.js b/test/distrib/distrib-test.js new file mode 100644 index 000000000..708c93eb8 --- /dev/null +++ b/test/distrib/distrib-test.js @@ -0,0 +1,22 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +const grpcJs = require('@grpc/grpc-js'); + +const grpcJsXds = require('@grpc/grpc-js-xds'); + +const protoLoader = require('@grpc/proto-loader'); diff --git a/test/distrib/run-distrib-test.sh b/test/distrib/run-distrib-test.sh new file mode 100755 index 000000000..e2ed0853c --- /dev/null +++ b/test/distrib/run-distrib-test.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# Copyright 2022 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -ex + +cd $(dirname $0) +base=$(pwd) + +cd ../../packages/grpc-js +npm pack +cd ../grpc-js-xds +npm pack +cd ../proto-loader +npm pack + +cd $base +npm install ../../packages/grpc-js/grpc-grpc-js-*.tgz +npm install ../../packages/grpc-js-xds/grpc-grpc-js-xds-*.tgz +npm install ../../packages/proto-loader/grpc-proto-loader-*.tgz + +node ./distrib-test.js diff --git a/test/gulpfile.ts b/test/gulpfile.ts index 2024f02cb..6d43d4472 100644 --- a/test/gulpfile.ts +++ b/test/gulpfile.ts @@ -25,6 +25,10 @@ import * as semver from 'semver'; const testDir = __dirname; const apiTestDir = path.resolve(testDir, 'api'); +/* The native library has some misbehavior in specific tests when running in + * Node 14 and above. */ +const NATIVE_SUPPORT_RANGE = '<14'; + const runInstall = () => { return execa('npm', ['install'], {cwd: testDir, stdio: 'inherit'}); }; @@ -51,11 +55,11 @@ const testJsClientNativeServer = runTestsWithFixture('native', 'js'); const testNativeClientJsServer = runTestsWithFixture('js', 'native'); const testJsClientJsServer = runTestsWithFixture('js', 'js'); -const test = gulp.series( +const test = semver.satisfies(process.version, NATIVE_SUPPORT_RANGE)? gulp.series( testJsClientJsServer, testJsClientNativeServer, testNativeClientJsServer - ); + ) : testJsClientJsServer; export { install, diff --git a/test/interop/interop_client.js b/test/interop/interop_client.js index d7df128da..61390c524 100644 --- a/test/interop/interop_client.js +++ b/test/interop/interop_client.js @@ -484,7 +484,7 @@ function getApplicationCreds(scope, callback) { } function getOauth2Creds(scope, callback) { - (new GoogleAuth()).getAccessToken().then((token) => { + (new GoogleAuth({scopes: scope})).getAccessToken().then((token) => { var updateMd = function(service_url, callback) { var metadata = new grpc.Metadata(); metadata.add('authorization', 'Bearer ' + token); @@ -550,7 +550,7 @@ exports.test_cases = test_cases; * Execute a single test case. * @param {string} address The address of the server to connect to, in the * format 'hostname:port' - * @param {string} host_overrirde The hostname of the server to use as an SSL + * @param {string} host_override The hostname of the server to use as an SSL * override * @param {string} test_case The name of the test case to run * @param {bool} tls Indicates that a secure channel should be used diff --git a/test/interop/interop_server.js b/test/interop/interop_server.js index cf7ae354e..b67ec5a8a 100644 --- a/test/interop/interop_server.js +++ b/test/interop/interop_server.js @@ -200,7 +200,7 @@ function handleHalfDuplex(call) { * Get a server object bound to the given port * @param {string} port Port to which to bind * @param {boolean} tls Indicates that the bound port should use TLS - * @param {function(Error, {{server: Server, port: number}})} callback Callback + * @param {function(Error, {server: Server, port: number})} callback Callback * to call with result or error * @param {object?} options Optional additional options to use when * constructing the server diff --git a/test/kokoro/linux.cfg b/test/kokoro/linux.cfg index f40e6db43..63f88d399 100644 --- a/test/kokoro/linux.cfg +++ b/test/kokoro/linux.cfg @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - # Config file for Kokoro (in protobuf text format) # Location of the continuous shell script in repository. @@ -21,4 +20,4 @@ action { define_artifacts { regex: "github/grpc-node/reports/**/sponge_log.xml" } -} +} \ No newline at end of file diff --git a/test/kokoro/linux_aarch64.cfg b/test/kokoro/linux_aarch64.cfg new file mode 100644 index 000000000..638748ab8 --- /dev/null +++ b/test/kokoro/linux_aarch64.cfg @@ -0,0 +1,24 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Config file for Kokoro (in protobuf text format) + +# Location of the continuous shell script in repository. +build_file: "grpc-node/test/kokoro_linux_aarch64.sh" +timeout_mins: 60 +action { + define_artifacts { + regex: "github/grpc-node/reports/**/sponge_log.xml" + } +} diff --git a/test/kokoro/xds-interop.cfg b/test/kokoro/xds-v3-interop.cfg similarity index 88% rename from test/kokoro/xds-interop.cfg rename to test/kokoro/xds-v3-interop.cfg index 866cb4b58..75377fe16 100644 --- a/test/kokoro/xds-interop.cfg +++ b/test/kokoro/xds-v3-interop.cfg @@ -1,4 +1,4 @@ -# Copyright 2017 gRPC authors. +# Copyright 2021 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ # Config file for Kokoro (in protobuf text format) # Location of the continuous shell script in repository. -build_file: "grpc-node/packages/grpc-js-xds/scripts/xds.sh" +build_file: "grpc-node/packages/grpc-js-xds/scripts/xds-v3.sh" timeout_mins: 360 action { define_artifacts { diff --git a/test/kokoro/xds_k8s_lb.cfg b/test/kokoro/xds_k8s_lb.cfg new file mode 100644 index 000000000..3efb62f29 --- /dev/null +++ b/test/kokoro/xds_k8s_lb.cfg @@ -0,0 +1,30 @@ +# Copyright 2022 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Config file for Kokoro (in protobuf text format) + +# Location of the continuous shell script in repository. +build_file: "grpc-node/packages/grpc-js-xds/scripts/psm-interop-test-node.sh" +timeout_mins: 180 +action { + define_artifacts { + regex: "artifacts/**/*sponge_log.xml" + regex: "artifacts/**/*.log" + strip_prefix: "artifacts" + } +} +env_vars { + key: "PSM_TEST_SUITE" + value: "lb" +} diff --git a/test/kokoro/xds_k8s_url_map.cfg b/test/kokoro/xds_k8s_url_map.cfg new file mode 100644 index 000000000..bb6e6baf1 --- /dev/null +++ b/test/kokoro/xds_k8s_url_map.cfg @@ -0,0 +1,30 @@ +# Copyright 2022 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Config file for Kokoro (in protobuf text format) + +# Location of the continuous shell script in repository. +build_file: "grpc-node/packages/grpc-js-xds/scripts/psm-interop-test-node.sh" +timeout_mins: 180 +action { + define_artifacts { + regex: "artifacts/**/*sponge_log.xml" + regex: "artifacts/**/*.log" + strip_prefix: "artifacts" + } +} +env_vars { + key: "PSM_TEST_SUITE" + value: "url_map" +} diff --git a/test/kokoro_linux_aarch64.sh b/test/kokoro_linux_aarch64.sh new file mode 100755 index 000000000..508881e5f --- /dev/null +++ b/test/kokoro_linux_aarch64.sh @@ -0,0 +1,46 @@ +#!/bin/bash +# Copyright 2021 The gRPC Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -ex +cd $(dirname $0)/.. + +test/aarch64/prepare_qemu.sh + +# better update submodules here. We could update the submodule when running +# under an emulator as well, but it comes with performance penalty. +git submodule update --init --recursive + +if [[ -t 0 ]]; then + DOCKER_TTY_ARGS="-it" +else + # The input device on kokoro is not a TTY, so -it does not work. + DOCKER_TTY_ARGS= +fi + +# the test command to run under an emulated aarch64 docker container. +# we only run tests for a single version of node, since tests under an emulator are significantly slower. +TEST_NODE_COMMAND="node_versions='12' ./run-tests.sh" + +# use an actual aarch64 docker image (with a real aarch64 node) to run build & test grpc-js under an emulator +# * mount the protobuf root as /work to be able to access the crosscompiled files +# * to avoid running the process inside docker as root (which can pollute the workspace with files owned by root), we force +# running under current user's UID and GID. To be able to do that, we need to provide a home directory for the user +# otherwise the UID would be homeless under the docker container (which can lead to various issues). For simplicity, +# we just run map the user's home to a throwaway temporary directory. +# TODO(jtattermusch): we're using arm64v8/node:12-stretch instead of arm64v8/node:12-buster because the buster-based image +# has a newer version of ssl that considers some of the ssl keys used for testing too short, making the tests +# fails with "error:140AB18F:SSL routines:SSL_CTX_use_certificate:ee key too small". +# See https://github.com/grpc/grpc-node/issues/1795 +docker run $DOCKER_TTY_ARGS --rm --user "$(id -u):$(id -g)" -e "HOME=/home/fake-user" -v "$(mktemp -d):/home/fake-user" -v "$(pwd)":/work -w /work arm64v8/node:12-stretch bash -c "$TEST_NODE_COMMAND" diff --git a/test/package.json b/test/package.json index 4f42f7bf7..963093bc0 100644 --- a/test/package.json +++ b/test/package.json @@ -16,8 +16,10 @@ "dependencies": { "express": "^4.16.3", "google-auth-library": "^6.1.0", - "grpc": "^1.24.2", "lodash": "^4.17.4", "poisson-process": "^1.0.0" + }, + "optionalDependencies": { + "grpc": "^1.24.2" } } diff --git a/test/performance/benchmark_client.js b/test/performance/benchmark_client.js index 42605a2fd..7b2a689a7 100644 --- a/test/performance/benchmark_client.js +++ b/test/performance/benchmark_client.js @@ -328,7 +328,7 @@ BenchmarkClient.prototype.startPoisson = function( }; /** - * Return curent statistics for the client. If reset is set, restart + * Return current statistics for the client. If reset is set, restart * statistic collection. * @param {boolean} reset Indicates that statistics should be reset * @return {object} Client statistics diff --git a/test/performance/benchmark_client_express.js b/test/performance/benchmark_client_express.js index f8be6d45e..b21346399 100644 --- a/test/performance/benchmark_client_express.js +++ b/test/performance/benchmark_client_express.js @@ -243,7 +243,7 @@ BenchmarkClient.prototype.startPoisson = function( }; /** - * Return curent statistics for the client. If reset is set, restart + * Return current statistics for the client. If reset is set, restart * statistic collection. * @param {boolean} reset Indicates that statistics should be reset * @return {object} Client statistics diff --git a/test/performance/benchmark_server.js b/test/performance/benchmark_server.js index 64128b9d0..0d6c7178c 100644 --- a/test/performance/benchmark_server.js +++ b/test/performance/benchmark_server.js @@ -154,8 +154,9 @@ util.inherits(BenchmarkServer, EventEmitter); * Start the benchmark server. */ BenchmarkServer.prototype.start = function() { - this.server.bindAsync(this.host + ':' + this.port, this.creds, (err) => { + this.server.bindAsync(this.host + ':' + this.port, this.creds, (err, port) => { assert.ifError(err); + this.port = port; this.server.start(); this.last_wall_time = process.hrtime(); this.last_usage = process.cpuUsage(); diff --git a/test/performance/driver.js b/test/performance/driver.js new file mode 100644 index 000000000..9ecc4bc21 --- /dev/null +++ b/test/performance/driver.js @@ -0,0 +1,96 @@ +const grpc = require('../any_grpc').server; +const protoLoader = require('../../packages/proto-loader'); +const protoPackage = protoLoader.loadSync( + 'src/proto/grpc/testing/worker_service.proto', + {keepCase: true, + defaults: true, + enums: String, + oneofs: true, + includeDirs: [__dirname + '/../proto/']}); +const serviceProto = grpc.loadPackageDefinition(protoPackage).grpc.testing; + +function main() { + const parseArgs = require('minimist'); + const argv = parseArgs(process.argv, { + string: ['client_worker_port', 'server_worker_port'] + }); + const clientWorker = new serviceProto.WorkerService(`localhost:${argv.client_worker_port}`, grpc.credentials.createInsecure()); + const serverWorker = new serviceProto.WorkerService(`localhost:${argv.server_worker_port}`, grpc.credentials.createInsecure()); + const serverWorkerStream = serverWorker.runServer(); + const clientWorkerStream = clientWorker.runClient(); + let firstServerResponseReceived = false; + let markCount = 0; + serverWorkerStream.on('data', (response) => { + console.log('Server stats:', response.stats); + if (!firstServerResponseReceived) { + firstServerResponseReceived = true; + clientWorkerStream.write({ + setup: { + server_targets: [`localhost:${response.port}`], + client_channels: 1, + outstanding_rpcs_per_channel: 1, + histogram_params: { + resolution: 0.01, + max_possible:60000000000 + }, + payload_config: { + bytebuf_params: { + req_size: 10, + resp_size: 10 + } + }, + load_params: { + closed_loop: {} + } + } + }); + clientWorkerStream.on('status', (status) => { + console.log('Received client worker status ' + JSON.stringify(status)); + serverWorkerStream.end(); + }); + const markInterval = setInterval(() => { + if (markCount >= 5) { + clientWorkerStream.end(); + clearInterval(markInterval); + } else { + clientWorkerStream.write({ + mark: {} + }); + serverWorkerStream.write({ + mark: {} + }); + } + markCount += 1; + }, 1000); + } + }); + clientWorkerStream.on('data', (response) => { + console.log('Client stats:', response.stats); + }); + serverWorkerStream.write({ + setup: { + port: 0 + } + }); + serverWorkerStream.on('status', (status) => { + console.log('Received server worker status ' + JSON.stringify(status)); + clientWorker.quitWorker({}, (error, response) => { + if (error) { + console.log('Received error on clientWorker.quitWorker:', error); + } else { + console.log('Received response from clientWorker.quitWorker'); + } + }); + serverWorker.quitWorker({}, (error, response) => { + if (error) { + console.log('Received error on serverWorker.quitWorker:', error); + } else { + console.log('Received response from serverWorker.quitWorker'); + } + }); + }); +} + +if (require.main === module) { + main(); +} diff --git a/test/performance/histogram.js b/test/performance/histogram.js index a03f2c13a..717988967 100644 --- a/test/performance/histogram.js +++ b/test/performance/histogram.js @@ -95,7 +95,7 @@ Histogram.prototype.mean = function() { }; /** - * Get the variance of all added values. Used to calulate the standard deviation + * Get the variance of all added values. Used to calculate the standard deviation * @return {number} The variance */ Histogram.prototype.variance = function() { diff --git a/test/performance/worker.js b/test/performance/worker.js index 86f17df2a..786d70560 100644 --- a/test/performance/worker.js +++ b/test/performance/worker.js @@ -39,13 +39,13 @@ function runServer(port, benchmark_impl, callback) { server.addService(serviceProto.WorkerService.service, new WorkerServiceImpl(benchmark_impl, server)); var address = '0.0.0.0:' + port; - server.bindAsync(address, server_creds, (err) => { + server.bindAsync(address, server_creds, (err, port) => { if (err) { return callback(err); } server.start(); - console.log('running QPS worker on %s', address); + console.log('running QPS worker on 0.0.0.0:%s', port); callback(null, server); }); } diff --git a/test/performance/worker_service_impl.js b/test/performance/worker_service_impl.js index a73d77efc..30016aa71 100644 --- a/test/performance/worker_service_impl.js +++ b/test/performance/worker_service_impl.js @@ -41,7 +41,12 @@ module.exports = function WorkerServiceImpl(benchmark_impl, server) { this.quitWorker = function quitWorker(call, callback) { callback(null, {}); - server.tryShutdown(function() {}); + /* Due to https://github.com/nodejs/node/issues/42713, tryShutdown acts + * like forceShutdown on some Node versions. So, delay calling tryShutdown + * until after done handling this request. */ + setTimeout(() => { + server.tryShutdown(function() {}); + }, 10); }; this.runClient = function runClient(call) { diff --git a/tools/release/native/Dockerfile b/tools/release/native/Dockerfile index bc03bf178..18198b8f3 100644 --- a/tools/release/native/Dockerfile +++ b/tools/release/native/Dockerfile @@ -1,12 +1,21 @@ -FROM debian:jessie +# NOTE: We don't have to worry about glibc versions +# because we use static linking during the +# compile step. +# (See packages/grpc-tools/CMakeLists.txt#L25) + +FROM ubuntu:22.04 -RUN echo "deb http://archive.debian.org/debian jessie-backports main" > /etc/apt/sources.list.d/backports.list -RUN echo 'Acquire::Check-Valid-Until "false";' > /etc/apt/apt.conf -RUN sed -i '/deb http:\/\/deb.debian.org\/debian jessie-updates main/d' /etc/apt/sources.list RUN apt-get update -RUN apt-get -t jessie-backports install -y cmake -RUN apt-get install -y curl build-essential python libc6-dev-i386 lib32stdc++-4.9-dev jq -RUN curl -fsSL get.docker.com | bash +RUN apt-get install -y cmake curl build-essential \ + python3 libc6-dev-i386-cross libc6-dev-amd64-cross \ + libc6-dev-arm64-cross lib32stdc++6-amd64-cross jq \ + lib32stdc++6-x32-cross libstdc++6-amd64-cross \ + libstdc++6-arm64-cross libstdc++6-i386-cross \ + gcc-i686-linux-gnu g++-i686-linux-gnu tar file \ + gcc-x86-64-linux-gnu g++-x86-64-linux-gnu binutils \ + gcc-aarch64-linux-gnu g++-aarch64-linux-gnu make \ + gcc g++ gzip bash libc6-amd64-i386-cross \ + libc6-dev-amd64-i386-cross RUN mkdir /usr/local/nvm ENV NVM_DIR /usr/local/nvm